prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import random
import pandas as pd
import warnings
# Suite of test that checks if data_type inferred correctly by Lux
def test_check_cars():
lux.config.set_SQL_connection("")
df = pd.read_csv("lux/data/car.csv")
df.maintain_metadata()
assert df.data_type["Name"] == "nominal"
assert df.data_type["MilesPerGal"] == "quantitative"
assert df.data_type["Cylinders"] == "nominal"
assert df.data_type["Displacement"] == "quantitative"
assert df.data_type["Horsepower"] == "quantitative"
assert df.data_type["Weight"] == "quantitative"
assert df.data_type["Acceleration"] == "quantitative"
assert df.data_type["Year"] == "temporal"
assert df.data_type["Origin"] == "nominal"
def test_check_int_id():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true"
)
df._ipython_display_()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert len(inverted_data_type["id"]) == 3
assert (
"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field."
in df._message.to_html()
)
def test_check_str_id():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true")
df._ipython_display_()
assert (
"<code>customerID</code> is not visualized since it resembles an ID field.</li>"
in df._message.to_html()
)
def test_check_hpi():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"HPIRank": "quantitative",
"Country": "geographical",
"SubRegion": "nominal",
"AverageLifeExpectancy": "quantitative",
"AverageWellBeing": "quantitative",
"HappyLifeYears": "quantitative",
"Footprint": "quantitative",
"InequalityOfOutcomes": "quantitative",
"InequalityAdjustedLifeExpectancy": "quantitative",
"InequalityAdjustedWellbeing": "quantitative",
"HappyPlanetIndex": "quantitative",
"GDPPerCapita": "quantitative",
"Population": "quantitative",
}
def test_check_airbnb():
df = | pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true") | pandas.read_csv |
from random import randint
import pandas as pd
import pytest
import janitor # noqa: F401
import janitor.timeseries # noqa: F401
@pytest.fixture
def timeseries_dataframe() -> pd.DataFrame:
"""
Returns a time series dataframe
"""
ts_index = | pd.date_range("1/1/2019", periods=1000, freq="1H") | pandas.date_range |
# Copyright 2017-present, Bill & <NAME> Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from toolz.itertoolz import groupby
import pandas as pd
import pytest
from ..dataframes import (
get_counts_df,
get_counts_by_domain,
pivot_counts_df,
get_variable_counts,
get_variable_count_by_variable
)
from ..models import (
Study,
Variable,
)
from .factories import (
AgeVariableFactory,
CountFactory,
QualifierVariableFactory,
StudyFactory,
VariableFactory,
)
@pytest.mark.django_db
def test_get_counts_df():
age_var = AgeVariableFactory()
var_var = VariableFactory(domain__code="FOO", domain__label="foo")
qual_var = QualifierVariableFactory(domain__code="BAR", domain__label="bar")
study_1 = StudyFactory()
study_2 = StudyFactory()
CountFactory(codes=[var_var], study=study_2, count=21)
CountFactory(codes=[age_var, var_var], study=study_1, count=22)
CountFactory(codes=[age_var, var_var, qual_var], study=study_1, count=23)
studies = Study.objects.all()
df = get_counts_df(studies=studies)
# id study study_label count domain_code domain_label codes
# 0 1 2 ID#1 21 FOO foo 2
# 1 2 1 ID#0 22 AGECAT Age 1
# 2 2 1 ID#0 22 FOO foo 2
# 3 3 1 ID#0 23 AGECAT Age 1
# 4 3 1 ID#0 23 FOO foo 2
# 5 3 1 ID#0 23 BAR bar 3
assert set(df.columns) == set(['id', 'study', 'study_label', 'count', 'domain_code', 'domain_label', 'codes', 'subjects'])
assert len(df) == 6
@pytest.mark.django_db
def test_get_counts_df_no_counts_for_studies():
StudyFactory.create_batch(2)
studies = Study.objects.all()
df = get_counts_df(studies=studies)
# id study study_label count domain_code domain_label codes
assert set(df.columns) == set(['id', 'study', 'study_label', 'count', 'domain_code', 'domain_label', 'codes', 'subjects'])
assert len(df) == 0
@pytest.fixture
@pytest.mark.django_db
def test_df():
age_var = AgeVariableFactory(label='age')
var_var = VariableFactory(domain__code="FOO", domain__label="foo", label="var")
qual_var = QualifierVariableFactory(domain__code="BAR", domain__label="bar")
study_1 = StudyFactory()
study_2 = StudyFactory()
CountFactory(codes=[var_var], study=study_2, count=21)
CountFactory(codes=[age_var, var_var], study=study_1, count=22)
CountFactory(codes=[age_var, var_var, qual_var], study=study_1, count=23)
df = get_counts_df(studies=Study.objects.all())
return df
@pytest.fixture
def pivot_df(test_df):
return pivot_counts_df(test_df)
@pytest.mark.django_db
def test_get_counts_by_domain(test_df):
df = get_counts_by_domain(test_df)
# study study_label domain_code count domain_label
# 0 5 ID#4 AGECAT 45 Age
# 1 5 ID#4 BAR 23 bar
# 2 5 ID#4 FOO 45 foo
# 3 6 ID#5 FOO 21 foo
assert set(df.columns) == set(['study', 'study_label', 'domain_code', 'count', 'domain_label', 'subjects'])
pd.util.testing.assert_series_equal(df['count'],
pd.Series(data=[23, 23, 23, 21], name='count'))
pd.util.testing.assert_series_equal(df['domain_label'],
pd.Series(data=["Age", "bar", "foo", "foo"], name='domain_label'))
@pytest.mark.django_db
def test_pivot_counts_df(test_df):
df = pivot_counts_df(test_df)
# domain_code AGECAT BAR FOO
# id study study_label count
# 7 8 ID#7 21 NaN NaN 8.0
# 8 7 ID#6 22 7.0 NaN 8.0
# 9 7 ID#6 23 7.0 9.0 8.0
assert len(df) == 3
pd.util.testing.assert_index_equal(df.columns,
pd.Index(['AGECAT', 'BAR', 'FOO'], dtype='object', name='domain_code'))
@pytest.mark.django_db
def test_get_variable_counts(pivot_df):
# implementation detail to avoid repeating query
variables = Variable.objects.all()
var_lookup = groupby('id', variables.values('id', 'label', 'code'))
df = get_variable_counts(pivot_df, var_lookup, "FOO")
# study study_label FOO id count var_code var_label
# 0 9 ID#8 11.0 23 45 11.0 var
# 1 10 ID#9 11.0 10 21 11.0 var
assert set(df.columns) == set(['study', 'study_label', 'FOO', 'id', 'count', 'var_code', 'var_label', 'subjects'])
pd.util.testing.assert_series_equal(df.var_label, | pd.Series(data=["var", "var"], name='var_label') | pandas.Series |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
for conv in [
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
]:
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[[Timestamp("2011-01-01"), Timestamp("2011-01-02")]]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-01"),
]
exp = Series(
[0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s"
)
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[
[pd.Period("2011-01-01", freq="D"), pd.Period("2011-01-02", freq="D")]
]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-01", freq="D"),
]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name="idx"), name="s")
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
pd.Period("2011-01-03", freq="D"),
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-03", freq="D"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_nanosecond_getitem_setitem_with_tz(self):
# GH 11679
data = ["2016-06-28 08:30:00.123456789"]
index = pd.DatetimeIndex(data, dtype="datetime64[ns, America/Chicago]")
df = DataFrame({"a": [10]}, index=index)
result = df.loc[df.index[0]]
expected = Series(10, index=["a"], name=df.index[0])
tm.assert_series_equal(result, expected)
result = df.copy()
result.loc[df.index[0], "a"] = -1
expected = DataFrame(-1, index=index, columns=["a"])
tm.assert_frame_equal(result, expected)
def test_loc_getitem_across_dst(self):
# GH 21846
idx = pd.date_range(
"2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min"
)
series2 = pd.Series([0, 1, 2, 3, 4], index=idx)
t_1 = pd.Timestamp(
"2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min"
)
t_2 = pd.Timestamp(
"2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min"
)
result = series2.loc[t_1:t_2]
expected = pd.Series([2, 3], index=idx[2:4])
tm.assert_series_equal(result, expected)
result = series2[t_1]
expected = 2
assert result == expected
def test_loc_incremental_setitem_with_dst(self):
# GH 20724
base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
result = pd.Series([0], index=[idxs[0]])
for ts in idxs:
result.loc[ts] = 1
expected = pd.Series(1, index=idxs)
tm.assert_series_equal(result, expected)
def test_loc_setitem_with_existing_dst(self):
# GH 18308
start = pd.Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid")
end = pd.Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid")
ts = pd.Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid")
idx = pd.date_range(start, end, closed="left", freq="H")
result = pd.DataFrame(index=idx, columns=["value"])
result.loc[ts, "value"] = 12
expected = pd.DataFrame(
[np.nan] * len(idx) + [12],
index=idx.append(pd.DatetimeIndex([ts])),
columns=["value"],
dtype=object,
)
tm.assert_frame_equal(result, expected)
def test_loc_str_slicing(self):
ix = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
ser = ix.to_series()
result = ser.loc[:"2017-12"]
expected = ser.iloc[:-1]
tm.assert_series_equal(result, expected)
def test_loc_label_slicing(self):
ix = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
ser = ix.to_series()
result = ser.loc[: ix[-2]]
expected = ser.iloc[:-1]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = | Series(['foo', 'bar', NA, 'baz']) | pandas.Series |
import datetime as dt
import os
from datetime import datetime
from typing import List, Tuple
import numpy as np
import pandas as pd
from domain.demand_prediction_mode import DemandPredictionMode
# random.seed(1234)
np.random.seed(1234)
# torch.manual_seed(1234)
# torch.cuda.manual_seed_all(1234)
# torch.backends.cudnn.deterministic = True
def timestamp_datetime(value) -> datetime:
d = datetime.fromtimestamp(value)
t = dt.datetime(d.year, d.month, d.day, d.hour, d.minute, 0)
return t
def string_datetime(value):
return dt.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
def string_pd_timestamp(value):
d = string_datetime(value)
t = pd.Timestamp(d.year, d.month, d.day, d.hour, d.minute)
return t
def read_map(input_file_path: str) -> np.ndarray:
reader = pd.read_csv(input_file_path, chunksize=1000)
map_list = []
for chunk in reader:
map_list.append(chunk)
map_df: pd.DataFrame = pd.concat(map_list)
map_df = map_df.drop(["Unnamed: 0"], axis=1)
map_values = map_df.values
map_values = map_values.astype("int64")
return map_values
def read_cost_map(input_file_path: str) -> np.ndarray:
reader = pd.read_csv(input_file_path, header=None, chunksize=1000)
map_list = []
for chunk in reader:
map_list.append(chunk)
map_df: pd.DataFrame = pd.concat(map_list)
return map_df.values
def read_path(input_file_path) -> np.ndarray:
reader = pd.read_csv(input_file_path, chunksize=1000)
path_list = []
for chunk in reader:
path_list.append(chunk)
path_df = pd.concat(path_list)
path_df = path_df.drop(["Unnamed: 0"], axis=1)
path_values = path_df.values
path_values = path_values.astype("int64")
return path_values
def read_node(input_file_path) -> pd.DataFrame:
reader = pd.read_csv(input_file_path, chunksize=1000)
node_list = []
for chunk in reader:
node_list.append(chunk)
node_df = | pd.concat(node_list) | pandas.concat |
"""
execution environment: cdips, + pipe-trex .pth file in
/home/lbouma/miniconda3/envs/cdips/lib/python3.7/site-packages
python -u paper_plot_all_figures.py &> logs/paper_plot_all.log &
"""
from glob import glob
import datetime, os, pickle, shutil, subprocess
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
from numpy import array as nparr
from datetime import datetime
from astropy.io import fits
from astropy.io.votable import from_table, writeto, parse
from astropy.coordinates import SkyCoord
from astropy import units as u
from astrobase import lcmath
from astrobase.lcmath import phase_magseries
from lcstatistics import compute_lc_statistics_fits
import lcstatistics as lcs
from cdips.utils import tess_noise_model as tnm
from cdips.utils import collect_cdips_lightcurves as ccl
from cdips.plotting import plot_star_catalog as psc
from cdips.plotting import plot_catalog_to_gaia_match_statistics as xms
from cdips.plotting import plot_wcsqa as wcsqa
from cdips.plotting import plot_quilt_PCs as pqp
from cdips.plotting import plot_quilt_s6_s7 as pqps
from cdips.plotting import savefig
import imageutils as iu
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
from skim_cream import plot_initial_period_finding_results
from collections import Counter
OUTDIR = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/paper_V_figures/'
if not os.path.exists(OUTDIR):
os.mkdir(OUTDIR)
CLUSTERDATADIR = '/home/lbouma/proj/cdips/data/cluster_data'
LCDIR = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/'
OC_MG_CAT_ver=0.5
def main():
# fig N: T magnitude CDF for all CDIPS target stars.
plot_target_star_cumulative_counts(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
# fig N: HRD for CDIPS TARGET (not LC) stars.
sectors = None
plot_hrd_scat(sectors, overwrite=1, closest_subset=1)
plot_hrd_scat(sectors, overwrite=1, close_subset=1)
plot_hrd_scat(sectors, overwrite=1)
# fig N: histogram of CDIPS target star age.
plot_target_star_hist_logt(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
assert 0
# fig N: pmRA and pmDEC scatter for CDIPS LC stars.
plot_pm_scat(sectors, overwrite=1, close_subset=1)
plot_pm_scat(sectors, overwrite=1, close_subset=0)
# fig N: histogram of ages of LC stars
plot_hist_logt(sectors, overwrite=1)
# fig N: histogram (or CDF) of T magnitude for LC stars
plot_cdf_T_mag(sectors, overwrite=1)
sectors = [6,7,8,9,10,11,12,13]
# fig N: RMS vs catalog T mag for LC stars, with TFA LCs
plot_rms_vs_mag(sectors, overwrite=1)
# fig N: positions of field and cluster LC stars (currently all cams)
plot_cluster_and_field_star_scatter(sectors=sectors, overwrite=1,
galacticcoords=True)
plot_cluster_and_field_star_scatter(sectors=sectors, overwrite=1)
# plot_singleccd_rms_vs_mag(sectors, overwrite=0)
# fig N: average autocorrelation fn of LCs
plot_avg_acf(sectors, size=10000, overwrite=1, cleanprevacf=False)
# fig N: stages of image processing.
plot_stages_of_image_processing(niceimage=1, overwrite=1)
plot_stages_of_image_processing(niceimage=0, overwrite=1)
# fig N: catalog_to_gaia_match_statistics for CDIPS target stars
plot_catalog_to_gaia_match_statistics(overwrite=1)
# fig N: quilt of interesting light curves, phase-folded
pqps.plot_quilt_s6_s7(overwrite=1)
# fig N: 3x2 quilt of phased PC
pqp.plot_quilt_PCs(overwrite=1, paper_aspect_ratio=0)
pqp.plot_quilt_PCs(overwrite=1, paper_aspect_ratio=1)
# timeseries figures
for sector in range(6,8):
for cam in range(1,5):
for ccd in range(1,5):
try:
plot_detrended_light_curves(
sector=sector, cam=cam, ccd=ccd, overwrite=0, seed=42
)
except Exception as e:
print('{}-{}-{} failed, because {}'.
format(sector,cam,ccd,repr(e)))
pass
plot_external_parameters_vs_time(
sector=6, cam=1, ccd=1, overwrite=1, seed=43)
plot_raw_light_curve_systematics(
sector=6, cam=1, ccd=2, overwrite=1, seed=43)
plot_raw_light_curve_systematics(
sector=7, cam=2, ccd=4, overwrite=1, seed=42)
# fig N: wcs quality verification for one photometric reference
plot_wcs_verification(overwrite=1)
# fig N: target star provenance
plot_target_star_reference_pie_chart(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
# fig N: tls_sde_vs_period_scatter
plot_tls_sde_vs_period_scatter(sectors, overwrite=1)
# fig N: LS period vs color evolution in time
plot_LS_period_vs_color_and_age(sectors, overwrite=1,
OC_MG_CAT_ver=OC_MG_CAT_ver)
# fig N: histogram (or CDF) of TICCONT. unfortunately this is only
# calculated for CTL stars, so by definition it has limited use
plot_cdf_cont(sectors, overwrite=0)
def get_Tmag(fitspath):
with fits.open(fitspath) as hdulist:
mag = hdulist[0].header['TESSMAG']
return mag
def get_mag(fitspath, ap='IRM2'):
with fits.open(fitspath) as hdulist:
mag = hdulist[1].data[ap]
return mag
def plot_external_parameters_vs_time(sector=6, cam=1, ccd=2, overwrite=1,
seed=42):
outpath = os.path.join(
OUTDIR,
'external_parameters_vs_time_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
#
# data dir with what lcs exist, and what their T mags are.
#
dfpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = pd.read_csv(dfpath, sep=',')
sel = (df['Tmags'] > 13) & (df['Tmags'] < 14)
lcpaths = nparr(df['lcpaths'][sel])
np.random.seed(seed)
spath = np.random.choice(lcpaths, size=1, replace=False)[0]
#
# define some keys. open the chosen lc. and get the times of momentum dumps.
#
lc = fits.open(spath)[1].data
magtype = 'IRM2'
keys = [magtype, 'XIC', 'YIC', 'FSV', 'FDV', 'FKV', 'CCDTEMP', 'BGV']
labels = [magtype, 'x', 'y', 's', 'd', 'k', 'T [$^\circ$C]', 'bkgd [ADU]']
time = lc['TMID_BJD']
baddir = (
'/nfs/phtess2/ar0/TESS/FFI/RED_IMGSUB/FULL/'+
's{}/'.format(str(sector).zfill(4))+
'RED_{}-{}-15??_ISP/badframes'.format(cam, ccd)
)
badframes = glob(os.path.join(baddir, '*.fits'))
mom_dump_times = []
qualitys = []
for badframe in badframes:
quality = iu.get_header_keyword(badframe, 'DQUALITY')
if not quality > 0 :
continue
tstart = iu.get_header_keyword(badframe, 'TSTART')
telapse = iu.get_header_keyword(badframe, 'TELAPSE')
bjdrefi = iu.get_header_keyword(badframe, 'BJDREFI')
tmid = bjdrefi + tstart + telapse / 2
mom_dump_times.append(tmid)
qualitys.append(quality)
#
# now make the plot
#
plt.close('all')
fig,axs = plt.subplots(nrows=len(keys), ncols=1,
figsize=(4, 1.2*len(keys)), sharex=True)
axs = axs.flatten()
for ax, key, label in zip(axs, keys, labels):
xval = time
yval = lc[key]
if label in ['x','y']:
yoffset = int(np.mean(yval))
yval -= yoffset
label += '- {:d} [px]'.format(yoffset)
elif label in [magtype]:
yoffset = np.round(np.median(yval), decimals=1)
yval -= yoffset
yval *= 1e3
label += '- {:.1f} [mmag]'.format(yoffset)
ax.scatter(xval, yval, rasterized=True, alpha=0.8, zorder=3, c='k',
lw=0, s=3)
ax.set_ylabel(label, fontsize='small')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.xaxis.set_tick_params(labelsize='small')
ax.yaxis.set_tick_params(labelsize='small')
if label in [magtype]:
ylim = ax.get_ylim()
ax.set_ylim((max(ylim), min(ylim)))
ylim = ax.get_ylim()
ax.vlines(mom_dump_times, min(ylim), max(ylim), color='orangered',
linestyle='--', zorder=0, lw=1, alpha=0.3)
ax.set_ylim((min(ylim), max(ylim)))
ax.set_xlabel('Time $\mathrm{{BJD}}_{{\mathrm{{TDB}}}}$ [days]',
fontsize='small')
fig.tight_layout(h_pad=-0.5, pad=0.2)
savefig(fig, outpath)
def plot_raw_light_curve_systematics(sector=None, cam=None, ccd=None,
overwrite=False, N_to_plot=20, seed=42):
"""
get a random sample of IRM2 light curves from the same camera & ccd. plot
them all together to show how we are systematics dominated.
"""
outpath = os.path.join(
OUTDIR,
'raw_light_curve_systematics_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
dfpath = os.path.join(
OUTDIR,
'raw_light_curve_systematics_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = pd.read_csv(dfpath, sep=',')
sel = (df['Tmags'] > 13) & (df['Tmags'] < 14)
lcpaths = nparr(df['lcpaths'][sel])
Tmags = nparr(df['Tmags'][sel])
np.random.seed(seed)
spaths = np.random.choice(lcpaths, size=2*N_to_plot, replace=False)
# shape: (N_to_plot x N_observations)
rawmags = nparr([get_mag(s, ap='IRM2') for s in spaths])
pcamags = nparr([get_mag(s, ap='PCA2') for s in spaths])
tfmags = nparr([get_mag(s, ap='TFA2') for s in spaths])
time = fits.open(spaths[0])[1].data['TMID_BJD']
assert time.shape[0] == rawmags.shape[1]
#
# make the stacked plot of raw mags.
#
f, ax = plt.subplots(figsize=(4,8))
colors = plt.cm.tab20b( list(range(N_to_plot)) )
ind = 0
for i in range(N_to_plot):
ind += 1
mag = rawmags[ind,:]
if np.all(pd.isnull(mag)):
continue
mag -= np.nanmean(mag)
offset = i*0.15
expected_norbits = 2
orbitgap = 0.5
norbits, groups = lcmath.find_lc_timegroups(time, mingap=orbitgap)
if norbits != expected_norbits:
errmsg = 'got {} orbits, expected {}. groups are {}'.format(
norbits, expected_norbits, repr(groups))
raise AssertionError
for group in groups:
tg_time = time[group]
tg_mag = mag[group]
ax.plot(tg_time, tg_mag+offset, c=colors[i], lw=0.5,
rasterized=True)
ax.set_xlabel('Time $\mathrm{{BJD}}_{{\mathrm{{TDB}}}}$ [days]')
ax.set_ylabel('Magnitude [arbitrary offset]')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
f.tight_layout(pad=0.2)
savefig(f, outpath)
def plot_detrended_light_curves(sector=None, cam=None, ccd=None,
overwrite=False, N_to_plot=20, seed=42):
"""
use the sample of light curves from plot_raw_light_curve_systematics to
show how super-awesome the detrending is.
"""
outpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
dfpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = | pd.read_csv(dfpath, sep=',') | pandas.read_csv |
from datetime import datetime
import pytest
from pandas import (
DatetimeIndex,
offsets,
to_datetime,
)
import pandas._testing as tm
from pandas.tseries.holiday import (
AbstractHolidayCalendar,
Holiday,
Timestamp,
USFederalHolidayCalendar,
USLaborDay,
USThanksgivingDay,
get_calendar,
)
@pytest.mark.parametrize(
"transform", [lambda x: x, lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)]
)
def test_calendar(transform):
start_date = datetime(2012, 1, 1)
end_date = datetime(2012, 12, 31)
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(transform(start_date), transform(end_date))
expected = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25),
]
assert list(holidays.to_pydatetime()) == expected
def test_calendar_caching():
# see gh-9552.
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super().__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday("jan1", year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday("jan2", year=2015, month=1, day=2)])
# Getting holidays for Jan 1 should not alter results for Jan 2.
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(["01-Jan-2015"]))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(["02-Jan-2015"]))
def test_calendar_observance_dates():
# see gh-11477
us_fed_cal = get_calendar("USFederalHolidayCalendar")
holidays0 = us_fed_cal.holidays(
datetime(2015, 7, 3), datetime(2015, 7, 3)
) # <-- same start and end dates
holidays1 = us_fed_cal.holidays(
datetime(2015, 7, 3), datetime(2015, 7, 6)
) # <-- different start and end dates
holidays2 = us_fed_cal.holidays(
datetime(2015, 7, 3), datetime(2015, 7, 3)
) # <-- same start and end dates
# These should all produce the same result.
#
# In addition, calling with different start and end
# dates should not alter the output if we call the
# function again with the same start and end date.
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name():
us_fed_cal = | get_calendar("USFederalHolidayCalendar") | pandas.tseries.holiday.get_calendar |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df):
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index
if isinstance(index, list)
else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.stack(level=level, dropna=dropna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that apply a function to every partition.
abs = MapFunction.register(pandas.DataFrame.abs, dtypes="copy")
applymap = MapFunction.register(pandas.DataFrame.applymap)
conj = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.conj(df))
)
invert = MapFunction.register(pandas.DataFrame.__invert__)
isin = MapFunction.register(pandas.DataFrame.isin, dtypes=np.bool)
isna = MapFunction.register(pandas.DataFrame.isna, dtypes=np.bool)
negative = MapFunction.register(pandas.DataFrame.__neg__)
notna = MapFunction.register(pandas.DataFrame.notna, dtypes=np.bool)
round = MapFunction.register(pandas.DataFrame.round)
replace = MapFunction.register(pandas.DataFrame.replace)
series_view = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
df.squeeze(axis=1).view(*args, **kwargs)
)
)
to_numeric = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
pandas.to_numeric(df.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def map_fn(df):
return pandas.DataFrame(df.squeeze(axis=1).repeat(repeats))
if isinstance(repeats, int) or (is_list_like(repeats) and len(repeats) == 1):
return MapFunction.register(map_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._apply_full_axis(0, map_fn))
# END Map partitions operations
# String map partitions operations
str_capitalize = MapFunction.register(_str_map("capitalize"), dtypes="copy")
str_center = MapFunction.register(_str_map("center"), dtypes="copy")
str_contains = MapFunction.register(_str_map("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_map("count"), dtypes=int)
str_endswith = MapFunction.register(_str_map("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_map("find"), dtypes="copy")
str_findall = MapFunction.register(_str_map("findall"), dtypes="copy")
str_get = MapFunction.register(_str_map("get"), dtypes="copy")
str_index = MapFunction.register(_str_map("index"), dtypes="copy")
str_isalnum = MapFunction.register(_str_map("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_map("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_map("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_map("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_map("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_map("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_map("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_map("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_map("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_map("join"), dtypes="copy")
str_len = MapFunction.register(_str_map("len"), dtypes=int)
str_ljust = MapFunction.register(_str_map("ljust"), dtypes="copy")
str_lower = MapFunction.register(_str_map("lower"), dtypes="copy")
str_lstrip = MapFunction.register(_str_map("lstrip"), dtypes="copy")
str_match = MapFunction.register(_str_map("match"), dtypes="copy")
str_normalize = MapFunction.register(_str_map("normalize"), dtypes="copy")
str_pad = MapFunction.register(_str_map("pad"), dtypes="copy")
str_partition = MapFunction.register(_str_map("partition"), dtypes="copy")
str_repeat = MapFunction.register(_str_map("repeat"), dtypes="copy")
str_replace = MapFunction.register(_str_map("replace"), dtypes="copy")
str_rfind = MapFunction.register(_str_map("rfind"), dtypes="copy")
str_rindex = MapFunction.register(_str_map("rindex"), dtypes="copy")
str_rjust = MapFunction.register(_str_map("rjust"), dtypes="copy")
str_rpartition = MapFunction.register(_str_map("rpartition"), dtypes="copy")
str_rsplit = MapFunction.register(_str_map("rsplit"), dtypes="copy")
str_rstrip = MapFunction.register(_str_map("rstrip"), dtypes="copy")
str_slice = MapFunction.register(_str_map("slice"), dtypes="copy")
str_slice_replace = MapFunction.register(_str_map("slice_replace"), dtypes="copy")
str_split = MapFunction.register(_str_map("split"), dtypes="copy")
str_startswith = MapFunction.register(_str_map("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_map("strip"), dtypes="copy")
str_swapcase = MapFunction.register(_str_map("swapcase"), dtypes="copy")
str_title = MapFunction.register(_str_map("title"), dtypes="copy")
str_translate = MapFunction.register(_str_map("translate"), dtypes="copy")
str_upper = MapFunction.register(_str_map("upper"), dtypes="copy")
str_wrap = MapFunction.register(_str_map("wrap"), dtypes="copy")
str_zfill = MapFunction.register(_str_map("zfill"), dtypes="copy")
# END String map partitions operations
def unique(self):
"""Return unique values of Series object.
Returns
-------
ndarray
The unique values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda x: x.squeeze(axis=1).unique(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Series.
Returns
-------
PandasQueryCompiler
"""
def map_func(part, *args, **kwargs):
elements_number = len(part.index)
assert elements_number > 0, "Wrong mapping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return pandas.DataFrame(processed_results)
def reduce_func(map_results, *args, **kwargs):
def get_value_index(value_result):
value_result_grouped = value_result.groupby(level=0)
rel_location = value_result_grouped.get_group("relative_location")
ind = value_result_grouped.get_group("index")
# executes if result is inside of the mapped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mapped parts
elif rel_location.nunique(dropna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mapped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
map_results_parsed = map_results.apply(
lambda ser: get_value_index(ser)
).squeeze()
if isinstance(map_results_parsed, pandas.Series):
map_results_parsed = map_results_parsed.to_list()
return pandas.Series(map_results_parsed)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt map partitions operations
dt_date = MapFunction.register(_dt_prop_map("date"))
dt_time = MapFunction.register(_dt_prop_map("time"))
dt_timetz = MapFunction.register(_dt_prop_map("timetz"))
dt_year = MapFunction.register(_dt_prop_map("year"))
dt_month = MapFunction.register(_dt_prop_map("month"))
dt_day = MapFunction.register(_dt_prop_map("day"))
dt_hour = MapFunction.register(_dt_prop_map("hour"))
dt_minute = MapFunction.register(_dt_prop_map("minute"))
dt_second = MapFunction.register(_dt_prop_map("second"))
dt_microsecond = MapFunction.register(_dt_prop_map("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_map("nanosecond"))
dt_week = MapFunction.register(_dt_prop_map("week"))
dt_weekofyear = MapFunction.register(_dt_prop_map("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_map("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_map("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_map("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_map("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_map("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_map("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_map("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_map("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_map("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_map("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_map("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_map("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_map("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_map("tz"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_freq = MapReduceFunction.register(
_dt_prop_map("freq"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_to_period = MapFunction.register(_dt_func_map("to_period"))
dt_to_pydatetime = MapFunction.register(_dt_func_map("to_pydatetime"))
dt_tz_localize = MapFunction.register(_dt_func_map("tz_localize"))
dt_tz_convert = MapFunction.register(_dt_func_map("tz_convert"))
dt_normalize = MapFunction.register(_dt_func_map("normalize"))
dt_strftime = MapFunction.register(_dt_func_map("strftime"))
dt_round = MapFunction.register(_dt_func_map("round"))
dt_floor = MapFunction.register(_dt_func_map("floor"))
dt_ceil = MapFunction.register(_dt_func_map("ceil"))
dt_month_name = MapFunction.register(_dt_func_map("month_name"))
dt_day_name = MapFunction.register(_dt_func_map("day_name"))
dt_to_pytimedelta = MapFunction.register(_dt_func_map("to_pytimedelta"))
dt_total_seconds = MapFunction.register(_dt_func_map("total_seconds"))
dt_seconds = MapFunction.register(_dt_prop_map("seconds"))
dt_days = MapFunction.register(_dt_prop_map("days"))
dt_microseconds = MapFunction.register(_dt_prop_map("microseconds"))
dt_nanoseconds = MapFunction.register(_dt_prop_map("nanoseconds"))
dt_components = MapFunction.register(
_dt_prop_map("components"), validate_columns=True
)
dt_qyear = MapFunction.register(_dt_prop_map("qyear"))
dt_start_time = MapFunction.register(_dt_prop_map("start_time"))
dt_end_time = MapFunction.register(_dt_prop_map("end_time"))
dt_to_timestamp = MapFunction.register(_dt_func_map("to_timestamp"))
# END Dt map partitions operations
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
return self.__constructor__(self._modin_frame.astype(col_dtypes))
# Column/Row partitions reduce operations
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
def first_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.first_valid_index())
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, first_valid_index_builder)
)
.min(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.last_valid_index())
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, last_valid_index_builder)
)
.max(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
)
def describe_builder(df, internal_indices=[]):
return df.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=empty_df.index,
new_columns=empty_df.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
cummax = FoldFunction.register(pandas.DataFrame.cummax)
cummin = FoldFunction.register(pandas.DataFrame.cummin)
cumsum = FoldFunction.register(pandas.DataFrame.cumsum)
cumprod = FoldFunction.register(pandas.DataFrame.cumprod)
diff = FoldFunction.register(pandas.DataFrame.diff)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame._fold(
axis, lambda df: df.clip(**kwargs)
)
else:
new_modin_frame = self._modin_frame._map(lambda df: df.clip(**kwargs))
return self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
"""
Computes the matrix multiplication of self and other.
Parameters
----------
other : PandasQueryCompiler or NumPy array
The other query compiler or NumPy array to matrix multiply with self.
squeeze_self : boolean
The flag to squeeze self.
squeeze_other : boolean
The flag to squeeze other (this flag is applied if other is query compiler).
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the matrix multiply.
"""
if isinstance(other, PandasQueryCompiler):
other = (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
def map_func(df, other=other, squeeze_self=squeeze_self):
result = df.squeeze(axis=1).dot(other) if squeeze_self else df.dot(other)
if is_list_like(result):
return pandas.DataFrame(result)
else:
return pandas.DataFrame([result])
num_cols = other.shape[1] if len(other.shape) > 1 else 1
if len(self.columns) == 1:
new_index = (
["__reduced__"]
if (len(self.index) == 1 or squeeze_self) and num_cols == 1
else None
)
new_columns = ["__reduced__"] if squeeze_self and num_cols == 1 else None
axis = 0
else:
new_index = self.index
new_columns = ["__reduced__"] if num_cols == 1 else None
axis = 1
new_modin_frame = self._modin_frame._apply_full_axis(
axis, map_func, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def _nsort(self, n, columns=None, keep="first", sort_type="nsmallest"):
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
getattr(pandas.Series, sort_type)(
df.squeeze(axis=1), n=n, keep=keep
)
)
return getattr(pandas.DataFrame, sort_type)(
df, n=n, columns=columns, keep=keep
)
if columns is None:
new_columns = ["__reduced__"]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def nsmallest(self, *args, **kwargs):
return self._nsort(sort_type="nsmallest", *args, **kwargs)
def nlargest(self, *args, **kwargs):
return self._nsort(sort_type="nlargest", *args, **kwargs)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
empty_eval = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, inplace=False, **kwargs)
)
if isinstance(empty_eval, pandas.Series):
new_columns = (
[empty_eval.name] if empty_eval.name is not None else ["__reduced__"]
)
else:
new_columns = empty_eval.columns
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.eval(expr, inplace=False, **kwargs)),
new_index=self.index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df):
result = pandas.DataFrame(df.mode(**kwargs))
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if axis == 0 and len(df) != len(result):
# Pad rows
result = result.reindex(index=pandas.RangeIndex(len(df.index)))
elif axis == 1 and len(df.columns) != len(result.columns):
# Pad columns
result = result.reindex(columns=pandas.RangeIndex(len(df.columns)))
return | pandas.DataFrame(result) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: June 17, 2020
#
# Description: Calculates entropy-based on network PCA
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
from qd.cae.dyna import D3plot
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# import holoviews as hv
# from holoviews import dim, opts
#
# hv.extension('matplotlib')
# ==============================================================================
# D3plot basics
# ==============================================================================
if False:
# load just the geometry
d3plot = D3plot("TMP/d3plot", read_states="disp")
# iterate over nodes
for node in d3plot.get_nodes():
coords = node.get_coords()
# Looking at internal data that was not initially loaded
d3plot = D3plot("TMP/d3plot")
node = d3plot.get_nodeByID(1)
len(node.get_disp())
# Read displacements
d3plot.read_states("disp")
len(node.get_disp())
# Read plastic strain
d3plot.read_states("plastic_strain")
# read Strain
d3plot.read_states("strain")
# Plot to html
# part.plot(iTimestep=0, export_filepath="model.html")
# plottng to browser
part = d3plot.get_partByID(1)
part.plot(iTimestep=0)
d3plot.plot(iTimestep=-1)
d3plot.plot(iTimestep=0,
element_result="strain",
fringe_bounds=[0, 0.025])
# ==============================================================================
# Try on my own data
# ==============================================================================
if True:
# dir = "/media/martin/Stuff/research/MaterailModels/MM003/"
# file = "MM003_job02"
d3plot = D3plot("/media/martin/Stuff/research/MaterailModels/MM003"
"/MM003_job01.d3plot", read_states="disp")
# -----------print out the file info ---------------------------------
# print(
# "{} nodes \n"
# "{} elements \n"
# "{} time steps \n".format(
# d3plot.get_nNodes(),
# d3plot.get_nElements(),
# d3plot.get_nTimesteps()
# )
# )
d3plot.info()
cords = []
for node in d3plot.get_nodes():
cords.append(node.get_coords())
initial_shape = []
for node in cords:
initial_shape.append(node[-1])
df = | pd.DataFrame(initial_shape, columns=['x', 'y', 'z']) | pandas.DataFrame |
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import itertools
import shutil
import sys
import os
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sn
def hyper_tune(
infile,
sample_data,
max_trials=10,
runs_per_trial=10,
max_epochs=100,
train_prop=0.8,
seed=None,
save_dir="out",
mod_name="hyper_tune",
):
"""
Tunes hyperparameters of keras model for population assignment.
Paramters
---------
infile : string
Path to VCF file containing genetic data.
sample_data : string
Path to tab-delimited file containing columns x, y,
pop, and sampleID.
max_trials : int
Number of trials to run for RandomSearch (Default=10).
runs_per_trial : int
Number of runs per trial for RandomSearch (Default=10).
max_epochs : int
Number of epochs to train model (Default=100).
train_prop : float
Proportion of data to train on. Remaining data will be kept
as a test set and not used until final model is trained
(Default=0.8).
seed : int
Random seed (Default=None).
save_dir : string
Directory to save output to (Default='out').
mod_name : string
Name of model in save directory (Default='hyper_tune').
Returns
-------
best_mod : keras sequential model
Best model from hyperparameter tuning
y_train : pd.DataFrame
training labels
y_val : pd.DataFrame
Validation labels
"""
# Check input types
if os.path.exists(infile) is False:
raise ValueError("infile does not exist")
if os.path.exists(sample_data) is False:
raise ValueError("sample_data does not exist")
if isinstance(max_trials, np.int) is False:
raise ValueError("max_trials should be integer")
if isinstance(runs_per_trial, np.int) is False:
raise ValueError("runs_per_trial should be integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be float")
if isinstance(seed, np.int) is False and seed is not None:
raise ValueError("seed should be integer or None")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be string")
if isinstance(mod_name, str) is False:
raise ValueError("mod_name should be string")
# Create save_dir if doesn't already exist
print(f"Output will be saved to: {save_dir}")
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Read data
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=False,
kfcv=True,
)
# Train prop can't be greater than num samples
if len(dc) * (1 - train_prop) < len(np.unique(samp_list["pops"])):
raise ValueError("train_prop is too high; not enough samples for test")
# Create test set that will be used to assess model performance later
X_train_0, X_test, y_train_0, y_test = train_test_split(
dc, samp_list, stratify=samp_list["pops"], train_size=train_prop
)
# Save train and test set to save_dir
np.save(save_dir + "/X_train.npy", X_train_0)
y_train_0.to_csv(save_dir + "/y_train.csv", index=False)
np.save(save_dir + "/X_test.npy", X_test)
y_test.to_csv(save_dir + "/y_test.csv", index=False)
# Split data into training and hold-out test set
X_train, X_val, y_train, y_val = train_test_split(
dc,
samp_list,
stratify=samp_list["pops"],
train_size=train_prop,
random_state=seed,
)
# Make sure all classes represented in y_val
if len(np.unique(y_train["pops"])) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
hypermodel = classifierHyperModel(
input_shape=X_train.shape[1], num_classes=len(popnames)
)
tuner = RandomSearch(
hypermodel,
objective="val_loss",
seed=seed,
max_trials=max_trials,
executions_per_trial=runs_per_trial,
directory=save_dir,
project_name=mod_name,
)
tuner.search(
X_train - 1,
y_train_enc,
epochs=max_epochs,
validation_data=(X_val - 1, y_val_enc),
)
best_mod = tuner.get_best_models(num_models=1)[0]
tuner.get_best_models(num_models=1)[0].save(save_dir + "/best_mod")
return best_mod, y_train, y_val
def kfcv(
infile,
sample_data,
mod_path=None,
n_splits=5,
n_reps=5,
ensemble=False,
save_dir="kfcv_output",
return_plot=True,
save_allele_counts=False,
**kwargs,
):
"""
Runs K-fold cross-validation to get an accuracy estimate of the model.
Parameters
----------
infile : string
Path to VCF or hdf5 file with genetic information
for all samples (including samples of unknown origin).
sample_data : string
Path to input file with all samples present (including
samples of unknown origin), which is a tab-delimited
text file with columns x, y, pop, and sampleID.
n_splits : int
Number of folds in k-fold cross-validation
(Default=5).
n_reps : int
Number of times to repeat k-fold cross-validation,
creating the number of models in the ensemble
(Default=5).
ensemble : bool
Whether to use ensemble of models of single model (Default=False).
save_dir : string
Directory where results will be stored (Default='kfcv_output').
return_plot : boolean
Returns a confusion matrix of correct assignments (Default=True).
save_allele counts : boolean
Whether or not to store derived allele counts in hdf5
file (Default=False).
**kwargs
Keyword arguments for pop_finder function.
Returns
-------
report : pd.DataFrame
Classification report for all models.
ensemble_report : pd.DataFrame
Classification report for ensemble of models.
"""
# Check inputs
# Check is sample_data path exists
if os.path.exists(sample_data) is False:
raise ValueError("path to sample_data incorrect")
# Make sure hdf5 file is not used as gen_dat
if os.path.exists(infile) is False:
raise ValueError("path to infile does not exist")
# Check data types
if isinstance(n_splits, np.int) is False:
raise ValueError("n_splits should be an integer")
if isinstance(n_reps, np.int) is False:
raise ValueError("n_reps should be an integer")
if isinstance(ensemble, bool) is False:
raise ValueError("ensemble should be a boolean")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be a string")
# Check nsplits is > 1
if n_splits <= 1:
raise ValueError("n_splits must be greater than 1")
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=save_allele_counts,
kfcv=True,
)
popnames = np.unique(samp_list["pops"])
# Check there are more samples in the smallest pop than n_splits
if n_splits > samp_list.groupby(["pops"]).agg(["count"]).min().values[0]:
raise ValueError(
"n_splits cannot be greater than number of samples in smallest pop"
)
# Create stratified k-fold
rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_reps)
pred_labels = []
true_labels = []
pred_labels_ensemble = []
true_labels_ensemble = []
ensemble_preds = pd.DataFrame()
preds = pd.DataFrame()
fold_var = 1
for t, v in rskf.split(dc, samp_list["pops"]):
# Subset train and validation data
X_train = dc[t, :] - 1
X_val = dc[v, :] - 1
y_train = samp_list.iloc[t]
y_val = samp_list.iloc[v]
if ensemble:
test_dict, tot_bag_df = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
ensemble=True,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if tot_bag_df.empty:
raise ValueError("Empty dataframe from pop_finder")
if len(test_dict) == 1:
raise ValueError(
"pop_finder results consists of single dataframe\
however ensemble set to True"
)
ensemble_preds = ensemble_preds.append(tot_bag_df)
else:
test_dict = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if len(test_dict["df"]) != 1:
raise ValueError(
"pop_finder results contains ensemble of models\
should be a single dataframe"
)
preds = preds.append(test_dict["df"][0])
tmp_pred_label = []
tmp_true_label = []
for i in range(0, len(test_dict["df"])):
tmp_pred_label.append(
test_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1).values
)
tmp_true_label.append(test_dict["df"][i]["true_pops"].values)
if ensemble:
pred_labels_ensemble.append(
tot_bag_df.iloc[:, 0:len(popnames)].idxmax(axis=1).values
)
true_labels_ensemble.append(tmp_true_label[0])
pred_labels.append(np.concatenate(tmp_pred_label, axis=0))
true_labels.append(np.concatenate(tmp_true_label, axis=0))
fold_var += 1
# return pred_labels, true_labels
pred_labels = np.concatenate(pred_labels)
true_labels = np.concatenate(true_labels)
report = classification_report(
true_labels, pred_labels, zero_division=1, output_dict=True
)
report = pd.DataFrame(report).transpose()
report.to_csv(save_dir + "/classification_report.csv")
if ensemble:
ensemble_preds.to_csv(save_dir + "/ensemble_preds.csv")
true_labels_ensemble = np.concatenate(true_labels_ensemble)
pred_labels_ensemble = np.concatenate(pred_labels_ensemble)
ensemble_report = classification_report(
true_labels_ensemble,
pred_labels_ensemble,
zero_division=1,
output_dict=True,
)
ensemble_report = pd.DataFrame(ensemble_report).transpose()
ensemble_report.to_csv(
save_dir + "/ensemble_classification_report.csv")
else:
preds.to_csv(save_dir + "/preds.csv")
if return_plot is True:
cm = confusion_matrix(true_labels, pred_labels, normalize="true")
cm = np.round(cm, 2)
plt.style.use("default")
plt.figure()
plt.imshow(cm, cmap="Blues")
plt.colorbar()
plt.ylabel("True Pop")
plt.xlabel("Pred Pop")
plt.title("Confusion Matrix")
tick_marks = np.arange(len(np.unique(true_labels)))
plt.xticks(tick_marks, np.unique(true_labels))
plt.yticks(tick_marks, np.unique(true_labels))
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.savefig(save_dir + "/cm.png")
plt.close()
if ensemble:
# Plot second confusion matrix
cm = confusion_matrix(
true_labels_ensemble, pred_labels_ensemble, normalize="true"
)
cm = np.round(cm, 2)
plt.style.use("default")
plt.figure()
plt.imshow(cm, cmap="Blues")
plt.colorbar()
plt.ylabel("True Pop")
plt.xlabel("Pred Pop")
plt.title("Confusion Matrix")
tick_marks = np.arange(len(np.unique(true_labels)))
plt.xticks(tick_marks, np.unique(true_labels))
plt.yticks(tick_marks, np.unique(true_labels))
thresh = cm.max() / 2.0
for i, j in itertools.product(
range(cm.shape[0]), range(cm.shape[1])
):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.savefig(save_dir + "/ensemble_cm.png")
plt.close()
if ensemble:
return report, ensemble_report
else:
return report
def pop_finder(
X_train,
y_train,
X_test,
y_test,
unknowns=None,
ukgen=None,
ensemble=False,
try_stacking=False,
nbags=10,
train_prop=0.8,
mod_path=None,
predict=False,
save_dir="out",
save_weights=False,
patience=20,
batch_size=32,
max_epochs=100,
gpu_number="0",
plot_history=False,
seed=None,
):
"""
Trains classifier neural network, calculates accuracy on
test set, and makes predictions.
Parameters
----------
X_train: np.array
Array of genetic data corresponding to train samples.
y_train: pd.DataFrame
Dataframe of train samples, including columns for samples and pops.
X_test: np.array
Array of genetic data corresponding to test samples.
y_test: pd.DataFrame
Dataframe of test samples, including columns for samples and pops.
unknowns: pd.DataFrame
Dataframe of unknowns calculated from read_data (Default=None).
ukgen : np.array
Array of genetic data corresponding to unknown samples
(Default=None).
ensemble : boolean
If set to true, will train an ensemble of models using
bootstrap aggregating (Default=False).
try_stacking : boolean
Use weights to influence ensemble model decisions. Must have
ensemble set to True to use. Use caution: with low test set sizes,
can be highly inaccurate and overfit (Default=False).
nbags : int
Number of "bags" (models) to create for the bootstrap
aggregating algorithm. This option only needs to be set if
ensemble is set to True (Default=20).
train_prop : float
Proportion of samples used in training (Default=0.8).
mod_path : string
Default=None
predict : boolean
Predict on unknown data. Must have unknowns in sample_data to use
this feature (Default=False).
save_dir : string
Directory to save results to (Default="out").
save_weights : boolean
Save model weights for later use (Default=False).
patience : int
How many epochs to wait before early stopping if loss has not
improved (Default=20).
batch_size : int
Default=32,
max_epochs : int
Default=100
gpu_number : string
Not in use yet, coming soon (Default="0").
plot_history : boolean
Plot training / validation history (Default=False).
seed : int
Random seed for splitting data (Default=None).
Returns
-------
test_dict : dict
Dictionary with test results.
tot_bag_df : pd.DataFrame
Dataframe with test results from ensemble.
"""
print(f"Output will be saved to: {save_dir}")
# Check if data is in right format
if isinstance(y_train, pd.DataFrame) is False:
raise ValueError("y_train is not a pandas dataframe")
if y_train.empty:
raise ValueError("y_train exists, but is empty")
if isinstance(y_test, pd.DataFrame) is False:
raise ValueError("y_test is not a pandas dataframe")
if y_test.empty:
raise ValueError("y_test exists, but is empty")
if isinstance(X_train, np.ndarray) is False:
raise ValueError("X_train is not a numpy array")
if len(X_train) == 0:
raise ValueError("X_train exists, but is empty")
if isinstance(X_test, np.ndarray) is False:
raise ValueError("X_test is not a numpy array")
if len(X_test) == 0:
raise ValueError("X_test exists, but is empty")
if isinstance(ensemble, bool) is False:
raise ValueError("ensemble should be a boolean")
if isinstance(try_stacking, bool) is False:
raise ValueError("try_stacking should be a boolean")
if isinstance(nbags, int) is False:
raise ValueError("nbags should be an integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be a float")
if isinstance(predict, bool) is False:
raise ValueError("predict should be a boolean")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be a string")
if isinstance(save_weights, bool) is False:
raise ValueError("save_weights should be a boolean")
if isinstance(patience, np.int) is False:
raise ValueError("patience should be an integer")
if isinstance(batch_size, np.int) is False:
raise ValueError("batch_size should be an integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be an integer")
if isinstance(plot_history, bool) is False:
raise ValueError("plot_history should be a boolean")
if isinstance(mod_path, str) is False and mod_path is not None:
raise ValueError("mod_path should be a string or None")
# Create save directory
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# If unknowns are not none
if unknowns is not None:
# Check if exists
if isinstance(unknowns, pd.DataFrame) is False:
raise ValueError("unknowns is not pandas dataframe")
if unknowns.empty:
raise ValueError("unknowns exists, but is empty")
if isinstance(ukgen, np.ndarray) is False:
raise ValueError("ukgen is not a numpy array")
if len(ukgen) == 0:
raise ValueError("ukgen exists, but is empty")
uksamples = unknowns["sampleID"].to_numpy()
# Add info about test samples
y_test_samples = y_test["samples"].to_numpy()
y_test_pops = y_test["pops"].to_numpy()
# One hot encode test values
enc = OneHotEncoder(handle_unknown="ignore")
y_test_enc = enc.fit_transform(
y_test["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
# results storage
TEST_LOSS = []
TEST_ACCURACY = []
TEST_95CI = []
yhats = []
ypreds = []
test_dict = {"count": [], "df": []}
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
if ensemble:
for i in range(nbags):
n_prime = np.int(np.ceil(len(X_train) * 0.8))
good_bag = False
while good_bag is False:
bag_X = np.zeros(shape=(n_prime, X_train.shape[1]))
bag_y = pd.DataFrame({"samples": [], "pops": [], "order": []})
for j in range(0, n_prime):
ind = np.random.choice(len(X_train))
bag_X[j] = X_train[ind]
bag_y = bag_y.append(y_train.iloc[ind])
dup_pops_df = bag_y.groupby(["pops"]).agg(["count"])
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and (dup_pops_df[("samples", "count")] > 1).all()
):
# Create validation set from training set
bag_X, X_val, bag_y, y_val = train_test_split(
bag_X, bag_y, stratify=bag_y["pops"],
train_size=train_prop
)
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and pd.Series(popnames).isin(y_val["pops"]).all()
):
good_bag = True
enc = OneHotEncoder(handle_unknown="ignore")
bag_y_enc = enc.fit_transform(
bag_y["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
if mod_path is None:
model = tf.Sequential()
model.add(tf.layers.BatchNormalization(
input_shape=(bag_X.shape[1],)))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dropout(0.25))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(128, activation="elu"))
model.add(tf.layers.Dense(len(popnames), activation="softmax"))
aopt = tf.optimizers.Adam(lr=0.0005)
model.compile(
loss="categorical_crossentropy",
optimizer=aopt,
metrics="accuracy"
)
else:
model = tf.models.load_model(mod_path + "/best_mod")
# Create callbacks
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + "/checkpoint.h5",
verbose=1,
# save_best_only=True,
save_weights_only=True,
monitor="val_loss",
# monitor="loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
bag_X - 1,
bag_y_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + "/checkpoint.h5")
if not save_weights:
os.remove(save_dir + "/checkpoint.h5")
# plot training history
if plot_history:
plt.switch_backend("agg")
fig = plt.figure(figsize=(3, 1.5), dpi=200)
plt.rcParams.update({"font.size": 7})
ax1 = fig.add_axes([0, 0, 1, 1])
ax1.plot(
history.history["val_loss"][3:],
"--",
color="black",
lw=0.5,
label="Validation Loss",
)
ax1.plot(
history.history["loss"][3:],
"-",
color="black",
lw=0.5,
label="Training Loss",
)
ax1.set_xlabel("Epoch")
ax1.legend()
fig.savefig(
save_dir + "/model" + str(i) + "_history.pdf",
bbox_inches="tight"
)
plt.close()
test_loss, test_acc = model.evaluate(X_test - 1, y_test_enc)
yhats.append(model.predict(X_test - 1))
test_df = pd.DataFrame(model.predict(X_test - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_df["bag"] = i
test_dict["count"].append(i)
test_dict["df"].append(test_df)
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
if predict:
ypreds.append(model.predict(ukgen))
tmp_df = pd.DataFrame(model.predict(ukgen))
tmp_df.columns = popnames
tmp_df["sampleID"] = uksamples
tmp_df["bag"] = i
pred_dict["count"].append(i)
pred_dict["df"].append(tmp_df)
# Find top populations for each sample
top_pops["df"].append(i)
top_pops["pops"].append(
pred_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1)
)
# Collect yhats and ypreds for weighted ensemble
yhats = np.array(yhats)
if predict:
ypreds = np.array(ypreds)
# Get ensemble accuracy
tot_bag_df = test_dict["df"][0].iloc[
:, 0:len(popnames)
].copy()
for i in range(0, len(test_dict["df"])):
tot_bag_df += test_dict["df"][i].iloc[:, 0:len(popnames)]
# Normalize values to be between 0 and 1
tot_bag_df = tot_bag_df / nbags
tot_bag_df["top_samp"] = tot_bag_df.idxmax(axis=1)
tot_bag_df["sampleID"] = test_dict["df"][0]["sampleID"]
tot_bag_df["true_pops"] = test_dict["df"][0]["true_pops"]
ENSEMBLE_TEST_ACCURACY = np.sum(
tot_bag_df["top_samp"] == tot_bag_df["true_pops"]
) / len(tot_bag_df)
tot_bag_df.to_csv(save_dir + "/ensemble_test_results.csv")
if predict:
top_pops_df = pd.DataFrame(top_pops["pops"])
top_pops_df.columns = uksamples
top_freqs = {"sample": [], "freq": []}
for samp in uksamples:
top_freqs["sample"].append(samp)
top_freqs["freq"].append(
top_pops_df[samp].value_counts() / len(top_pops_df)
)
# Save frequencies to csv for plotting
top_freqs_df = pd.DataFrame(top_freqs["freq"]).fillna(0)
top_freqs_df.to_csv(save_dir + "/pop_assign_freqs.csv")
# Create table to assignments by frequency
freq_df = pd.concat(
[
| pd.DataFrame(top_freqs["freq"]) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
import mlflow
import mlflow.sklearn
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
if __name__ == "__main__":
df = pd.read_csv("./WA_Fn-UseC_-Telco-Customer-Churn.csv")
# print(df.head())
# print(df.corr())
df['TotalCharges'].replace(to_replace=' ', value=np.NaN, inplace=True) # find and replace missing value with np.NaN
df['TotalCharges'] = pd.to_numeric(df['TotalCharges']) # convert the data column to numeric dtype
tc_median = df['TotalCharges'].median() # calculate median
df['TotalCharges'].fillna(tc_median, inplace=True) # replace missing value with median value
ndf = df.copy() # create a new copy of dataframe
# print(tc_median)
bool_cols = [col for col in df.columns if col not in ['gender','SeniorCitizen'] and len(df[col].unique()) == 2] # identify boolean columns
# print(bool_cols) # boolean columns
for col in bool_cols: # iterate through boolean columns
ndf[col] = np.where(ndf[col]=='No',0, 1) # replace Yes/No values with 1/0
ndf['gender'] = np.where(ndf['gender']=='Female', 0, 1) # replace Female/Male with 0/1, this is also known as binary encoding
ndf.drop('customerID', axis=1, inplace=True) # drop primary key / id column from the table
other_cat_cols = [col for col in ndf.select_dtypes('object').columns if col not in bool_cols and col not in ['customerID', 'gender', 'SeniorCitizen']] # find other categorical column
# print(other_cat_cols)
ndf_dummies = | pd.get_dummies(ndf) | pandas.get_dummies |
import numpy as np
import pandas as pd
from hotspot import sim_data
from hotspot import Hotspot
def test_models():
"""
Ensure each model runs
"""
# Simulate some data
N_CELLS = 100
N_DIM = 10
N_GENES = 10
latent = sim_data.sim_latent(N_CELLS, N_DIM)
latent = pd.DataFrame(
latent,
index=['Cell{}'.format(i+1) for i in range(N_CELLS)]
)
umi_counts = sim_data.sim_umi_counts(N_CELLS, 2000, 200)
umi_counts = | pd.Series(umi_counts) | pandas.Series |
# -*- coding: utf-8 -*-
"""Make a curation sheet for the bioregistry."""
import pandas as pd
import bioregistry
from bioregistry.constants import BIOREGISTRY_MODULE
def descriptions():
"""Make a curation sheet for descriptions."""
columns = [
"prefix",
"name",
"homepage",
"deprecated",
"description",
]
path = BIOREGISTRY_MODULE.join("curation", name="descriptions.tsv")
rows = []
for prefix in bioregistry.read_registry():
if bioregistry.get_description(prefix):
continue
homepage = bioregistry.get_homepage(prefix)
if homepage is None:
continue
deprecated = bioregistry.is_deprecated(prefix)
rows.append(
(
prefix,
bioregistry.get_name(prefix),
homepage,
"x" if deprecated else "",
"",
)
)
df = pd.DataFrame(rows, columns=columns)
df.to_csv(path, sep="\t")
def examples():
"""Make a curation sheet for examples."""
columns = [
"prefix",
"name",
"homepage",
"deprecated",
"example",
]
rows = []
for prefix in bioregistry.read_registry():
if bioregistry.get_example(prefix):
continue
homepage = bioregistry.get_homepage(prefix)
if homepage is None:
continue
deprecated = bioregistry.is_deprecated(prefix)
rows.append(
(
prefix,
bioregistry.get_name(prefix),
homepage,
"x" if deprecated else "",
"",
)
)
df = pd.DataFrame(rows, columns=columns)
path = BIOREGISTRY_MODULE.join("curation", name="examples.tsv")
df.to_csv(path, sep="\t")
def homepages():
"""Make a curation sheet for homepages."""
columns = [
"prefix",
"name",
"deprecated",
"homepage",
]
path = BIOREGISTRY_MODULE.join("curation", name="homepages.tsv")
rows = []
for prefix in bioregistry.read_registry():
homepage = bioregistry.get_homepage(prefix)
if homepage is not None:
continue
deprecated = bioregistry.is_deprecated(prefix)
rows.append(
(
prefix,
bioregistry.get_name(prefix),
"x" if deprecated else "",
homepage,
)
)
df = | pd.DataFrame(rows, columns=columns) | pandas.DataFrame |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_6_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=12 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
print("============")
print("Saving results")
print("============")
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_12_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
# save values for the functional omicron related proportions for each state
prop_omicron_vars = ("r", "tau", "m0", "m1")
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state].copy()
for v in prop_omicron_vars:
# take right size of the values to be N by N
y = samples[v + "[" + str(kk + 1) + "]"].values
pd.DataFrame(y[:mob_samples]).to_csv(
results_dir
+ v
+ "_"
+ state
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# now we save the sampled TP paths
# convert the appropriate sampled susceptible depletion factors to a csv and save them for simulation
# NOTE: this will not save an updated median, mean etc for the R_I's. We don't use it so it's not
# really important but it should be noted for later if we are comparing things. The step function
# R_I -> R_I_omicron, is noticeable and shouldn't be overlooked.
df_Rhats = df_Rhats[
["state", "date", "type", "median", "bottom", "lower", "upper", "top"]
+ [i for i in range(mob_samples)]
]
# # save the file as a csv (easier to handle in Julia for now)
df_Rhats.to_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
return None
def calculate_Reff_local(
Reff,
R_I,
R_I_omicron,
voc_effect,
prop_import,
omicron_start_day,
):
"""
Apply the same mixture model idea as per the TP model to get
R_eff^L = (R_eff - rho * RI)/(1 - rho)
and use this to weight the TP historically.
"""
# calculate this all in one step. Note that we set the Reff to -1 if
# the prop_import = 1 as in that instance the relationship breaks due to division by 0.
Reff_local = np.zeros(shape=Reff.shape[0])
for n in range(len(Reff_local)):
# adjust the Reff based on the time period of interest
if n < omicron_start_day:
R_I_tmp = R_I
else:
R_I_tmp = R_I_omicron * voc_effect
if prop_import[n] < 1:
Reff_local[n] = (Reff[n] - prop_import[n] * R_I_tmp) / (1 - prop_import[n])
else:
Reff_local[n] = 0
# Reff_local = [
# (Reff[t] - prop_import[t] * R_I) / (1 - prop_import[t])
# if prop_import[t] < 1 else -1 for t in range(Reff.shape[0])
# ]
return Reff_local
def adjust_TP(data_date):
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
n_days_nowcast_TP_adjustment,
mob_samples,
)
print("============")
print("Adjusting TP forecasts using data from", data_date)
print("============")
data_date = pd.to_datetime(data_date)
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
sim_start_date = pd.to_datetime(sim_start_date)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start=third_start_date, end=third_end_date).values,
"NT": pd.date_range(start="2021-12-01", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-11-25", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-08-01", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
omicron_start_day = (pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)).days
for strain in ("Delta", "Omicron"):
"""
Run adjustment model for the local TP estimates. This will adjust the local component of the
TP
"""
print("=========================")
print("Running TP adjustment model for", strain, "TP")
print("=========================")
df_forecast2 = pd.read_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["INFECTION_DATES"],
)
inferred_prop_imports = pd.read_csv(
results_dir
+ "rho_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# read in the case data and note that we want this to be infection dates to match up to Reff changes
case_data = read_in_NNDSS(
data_date, apply_delay_at_read=True, apply_inc_at_read=True
)
case_data = case_data[["date_inferred", "STATE", "imported", "local"]]
# this is the forecasted TP dataframe, without R_L type
df_forecast2_new = df_forecast2.loc[df_forecast2.type != "R_L"]
end_date = pd.to_datetime(today) + timedelta(days=num_forecast_days)
states_to_adjust = ["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"]
# read in the samples for weighting between TP and Reff.
samples2 = pd.read_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# extract the import values
if strain == "Delta":
R_I = samples2.R_I.to_numpy()
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_delta.to_numpy()
elif strain == "Omicron":
# extract the import values
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_omicron.to_numpy()
last_date_for_reff = (
pd.to_datetime(data_date)
- pd.Timedelta(days=truncation_days + n_days_nowcast_TP_adjustment - 1)
)
print("==============")
print("The last date the Reff estimate is used is", last_date_for_reff)
print("==============")
for state in states:
# filter case data by state
case_data_state = case_data.loc[case_data.STATE == state]
# take a sum of cases each day (this does not fill out missing days)
df_cases = case_data_state.groupby(["date_inferred", "STATE"]).agg(sum)
df_cases = df_cases.reset_index()
df_cases = df_cases.set_index("date_inferred")
# now we want to fill out indices by adding 0's on days with 0 cases and ensuring we go right up to the current truncated date
idx = pd.date_range(
pd.to_datetime("2020-03-01"),
last_date_for_reff,
)
is_omicron = np.array(idx >= pd.to_datetime(omicron_start_date))
df_cases = df_cases.reindex(idx, fill_value=0)
# filter the TP and Reff by state
df_forecast2_state_R_L = df_forecast2.loc[
((df_forecast2.state == state) & (df_forecast2.type == "R_L"))
]
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
# take a rolling average of the cases over the interval of consideration
idx = (pd.to_datetime(df_forecast2_state_R_L.date) >= pd.to_datetime("2020-03-01")) & (
| pd.to_datetime(df_forecast2_state_R_L.date) | pandas.to_datetime |
from .get_tmy_epw_file import get_tmy_epw_file
from .get_noaa_isd_lite_file import get_noaa_isd_lite_file
from .meteorology import Meteorology
from .analyze_noaa_isd_lite_file import analyze_noaa_isd_lite_file
import tempfile
import pandas as pd
import numpy as np
import os
import pkg_resources
from typing import Tuple
from calendar import isleap
from ._logging import _logger
# We buffer this path so that we don't create tons of temporary directories if the function is called many
# times, and so that calling it multiple times with the same WMO/year combination won't result in the same
# file being generated multiple times.
_tempdir_amy_epw = tempfile.mkdtemp()
def create_amy_epw_file(
wmo_index: int,
year: int,
*,
max_records_to_interpolate: int = 6,
max_records_to_impute: int = 48,
max_missing_amy_rows: int = 700,
amy_epw_dir: str = None,
tmy_epw_dir: str = None,
amy_dir: str = None,
amy_files: Tuple[str, str] = None,
allow_downloads: bool = False
) -> str:
"""
Combine data from a Typical Meteorological Year (TMY) EPW file and Actual Meteorological Year (AMY)
observed data to generate an AMY EPW file for a single calendar year at a given WMO.
:param wmo_index: The WMO Index of the weather station for which the EPW file should be generated.
Currently only weather stations in the United States are supported.
:param year: The year for which the EPW should be generated
:param amy_epw_dir: The directory into which the generated AMY EPW file should be written.
If not defined, a temporary directory will be created
:param tmy_epw_dir: The source directory for TMY EPW files. If a file for the requested WMO Index is
already present, it will be used. Otherwise a TMY EPW file will be downloaded (see this package's
get_tmy_epw_file() function for details). If no directory is given, the package's default
directory (in data/tmy_epw_files/ in the package's directory) will be used, which will allow AMY
files to be reused for future calls instead of downloading them repeatedly, which is quite time
consuming.
:param amy_dir: The source directory for AMY files. If a file for the requested WMO Index and year
is already present, it will be used. Otherwise a TMY EPW file will be downloaded (see this package's
get_noaa_isd_lite_file() function for details). If no directory is given, the package's default
directory (in data/ in the package's directory) will be used, which will allow AMY files to be
reused for future calls instead of downloading them repeatedly, which is quite time consuming.
:param amy_files: Instead of specifying amy_dir and allowing this method to try to find the appropriate
file, you can use this argument to specify the actual files that should be used. There should be
two files - the first the AMY file for "year", and the second the AMY file for the subsequent year,
which is required to support shifting the timezone from GMT to the timezone of the observed meteorology.
:param max_records_to_interpolate: The maximum length of sequence for which linear interpolation will be
used to replace missing values. See the documentation of _handle_missing_values() below for details.
:param max_records_to_impute: The maximum length of sequence for which imputation will be used to replace
missing values. See the documentation of _handle_missing_values() below for details.
:param max_missing_amy_rows: The maximum total number of missing rows to permit in a year's AMY file.
:param allow_downloads: If this is set to True, then any missing TMY or AMY files required to generate the
requested AMY EPW file will be downloaded from publicly available online catalogs. Otherwise, those files
being missing will result in an error being raised.
:return: The absolute path of the generated AMY EPW file
"""
if amy_dir is not None and amy_files is not None:
raise Exception("It is not possible to specify both amy_dir and amy_files")
if amy_epw_dir is None:
global _tempdir_amy_epw
amy_epw_dir = _tempdir_amy_epw
_logger.info(f"No amy_epw_dir was specified - generated AMY EPWs will be stored in {amy_epw_dir}")
# Either amy_files is specified, in which case we use the specified paths, or amy_dir is specified,
# in which case we will search that directory for AMY files, or neither is specified, in which case
# we will fall back to a generated temporary directory.
if amy_files is not None:
for p in amy_files:
if not os.path.exists(p):
raise Exception(f'Path {p} does not exist')
amy_file_path, amy_next_year_file_path = amy_files
else:
if amy_dir is None:
amy_dir = pkg_resources.resource_filename("diyepw", "data/noaa_isd_lite_files")
_logger.info(f"No amy_dir was specified - downloaded AMY files will be stored in the default location at {amy_dir}")
amy_file_path = get_noaa_isd_lite_file(wmo_index, year, output_dir=amy_dir, allow_downloads=allow_downloads)
amy_next_year_file_path = get_noaa_isd_lite_file(wmo_index, year+1, output_dir=amy_dir, allow_downloads=allow_downloads)
if max_missing_amy_rows is not None:
amy_file_analysis = analyze_noaa_isd_lite_file(amy_file_path)
if amy_file_analysis['total_rows_missing'] > max_missing_amy_rows:
raise Exception(f"File is missing {amy_file_analysis['total_rows_missing']} rows, but maximum allowed is {max_missing_amy_rows}")
# Read in the corresponding TMY3 EPW file.
tmy_epw_file_path = get_tmy_epw_file(wmo_index, tmy_epw_dir, allow_downloads=allow_downloads)
tmy = Meteorology.from_tmy3_file(tmy_epw_file_path)
# If the year we are generating an AMY EPW for is a leap year, then we need to add the leap day to the TMY data.
# We'll do that by adding the day with all empty values, then using the same routine we do to interpolate/impute
# missing data in our AMY files to fill in the missing data.
if isleap(year):
_logger.info(f"{year} is a leap year, using the interpolation strategy to populate TMY data for Feb. 29")
for hour in range(1, 25):
col_names = tmy.observations.columns.to_list()
new_row_vals = [1982, 2, 29, hour, 0]
new_row_vals.extend(np.repeat(np.nan, len(col_names) - len(new_row_vals)))
new_row = pd.DataFrame([new_row_vals], columns=col_names)
tmy.observations = tmy.observations.append(new_row)
# We sort by month, day and hour. We do *not* sort by year, because the year column doesn't matter and because
# it is in any case not consistent throughout a TMY data set
tmy.observations = tmy.observations.sort_values(by=["month", "day", "hour"])
#TODO: This is where I left off Thursday night. This call is changing the data types of the date fields into
# floating point values, which breaks the EPW file
_handle_missing_values(
tmy.observations,
max_to_interpolate=0, # We only want the imputation strategy to be used for the 24 missing hours
max_to_impute=24,
step=1,
imputation_range=14 * 24, # Two weeks, in hours
imputation_step=24,
ignore_columns=["Flags"] # The TMY files we use seem to be missing data for this field entirely
)
amy_epw_file_name = f"{tmy.country}_{tmy.state}_{tmy.city}.{tmy.station_number}_AMY_{year}.epw"
amy_epw_file_name = amy_epw_file_name.replace(" ", "-")
amy_epw_file_path = os.path.join(amy_epw_dir, amy_epw_file_name)
if os.path.exists(amy_epw_file_path):
_logger.info(f"File already exists at {amy_epw_file_path}, so a new one won't be generated.")
return amy_epw_file_path
# Read in the NOAA AMY file for the station for the requested year as well as the first 23 hours (sufficient
# to handle the largest possible timezone shift) of the subsequent year - the subsequent year's data will be
# used to populate the last hours of the year because of the time shift that we perform, which moves the first
# hours of January 1 into the final hours of December 31.
amy_df = pd.read_csv(amy_file_path, delim_whitespace=True, header=None)
amy_next_year_df = pd.read_csv(amy_next_year_file_path, delim_whitespace=True, header=None, nrows=23)
amy_df = pd.concat([amy_df, amy_next_year_df]).reset_index(drop=True)
amy_df = _set_noaa_df_columns(amy_df)
amy_df = _create_timestamp_index_for_noaa_df(amy_df)
# Shift the timestamp (index) to match the time zone of the WMO station.
amy_df = amy_df.shift(periods= tmy.timezone_gmt_offset, freq='H')
# Remove time steps that aren't applicable to the year of interest
amy_df = _map_noaa_df_to_year(amy_df, year)
_handle_missing_values(
amy_df,
step= | pd.Timedelta("1h") | pandas.Timedelta |
'''Perform clustering of single particle images based on their latent representations generated by cryoDRGN or cryoSPARC.'''
import sys
import os
import pickle
import numpy as np
import pandas as pd
import cryopicls
def main():
args = cryopicls.args.clustering.parse_args()
# Load particle metadata
if args.cryodrgn:
# Input is cryoDRGN result
if os.path.splitext(args.metadata)[1] == '.csg':
md = cryopicls.data_handling.cryosparc.CryoSPARCMetaData.load(
args.metadata)
elif os.path.splitext(args.metadata)[1] == '.star':
md = cryopicls.data_handling.relion.RelionMetaData.load(
args.metadata)
else:
sys.exit(
f'--metadata {args.metadata} is neither a cryoSPARC group file nor a RELION star file!'
)
elif args.cryosparc:
# Input is cryoSPARC 3D variability job
md = cryopicls.data_handling.cryosparc.CryoSPARCMetaData.load(
args.threedvar_csg)
# Load latent representations, Z
if args.cryodrgn:
Z = cryopicls.data_handling.cryodrgn.load_latent_variables(args.z_file)
elif args.cryosparc:
cs_file, _ = cryopicls.data_handling.cryosparc.get_metafiles_from_csg(args.threedvar_csg)
Z = cryopicls.data_handling.cryosparc.load_latent_variables(
cs_file, args.threedvar_num_components)
# Initialize clustering model
if args.algorithm == 'auto-gmm':
model = cryopicls.clustering.autogmm.AutoGMMClustering(**vars(args))
elif args.algorithm == 'x-means':
model = cryopicls.clustering.xmeans.XMeansClustering(**vars(args))
elif args.algorithm == 'k-means':
model = cryopicls.clustering.kmeans.KMeansClustering(**vars(args))
elif args.algorithm == 'g-means':
model = cryopicls.clustering.gmeans.GMeansClustering(**vars(args))
elif args.algorithm == 'manual':
thresh_list = cryopicls.clustering.manual_select.parse_thresh_args(**vars(args))
model = cryopicls.clustering.manual_select.ManualSelector(thresh_list)
# Do clustering
fitted_model, cluster_labels, cluster_centers = model.fit(Z)
# Save metadatas and model
os.makedirs(args.output_dir, exist_ok=True)
# The best model
with open(os.path.join(args.output_dir,
f'{args.output_file_rootname}_model.pkl'), 'wb') as f:
pickle.dump(fitted_model, f, protocol=4)
# Cluster centers
np.savetxt(
os.path.join(args.output_dir, f'{args.output_file_rootname}_cluster_centers.txt'),
cluster_centers)
# Coordinates in Z nearest to the cluster centers
label_list = np.unique(cluster_labels)
nearest_points = []
for i, cluster_center in enumerate(cluster_centers):
label = label_list[i]
Z_cluster_center = Z[np.nonzero(cluster_labels == label)[0]]
_, nearest_point = cryopicls.utils.nearest_in_array(
Z_cluster_center, cluster_center)
nearest_points.append(nearest_point)
np.savetxt(
os.path.join(
args.output_dir,
f'{args.output_file_rootname}_nearest_points_to_cluster_centers.txt'
), nearest_points)
# Metadata and Z of each cluster
for label in label_list:
idxs = np.nonzero(cluster_labels == label)[0]
md_cluster = md.iloc(idxs)
md_cluster.write(args.output_dir,
f'{args.output_file_rootname}_cluster{label:03d}')
Z_cluster = Z[idxs]
np.save(
os.path.join(args.output_dir,
f'{args.output_file_rootname}_cluster{label:03d}_Z'),
Z_cluster)
# Save Z and cluster_labels as dataframe (input for cryopicls_visualizer)
col_names = [f'dim_{x}' for x in range(1, Z.shape[1] + 1)]
df = pd.concat([
pd.DataFrame(data=Z, columns=col_names),
| pd.Series(data=cluster_labels, name='cluster') | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
| pd.Index(['a', 'b', 'c', 'd'], dtype='object') | pandas.Index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import pandas as pd
import traceback as tb
def json_to_eventlog(file_path):
with open(file_path, "r", encoding = "utf-8") as f:
data = json.load(f)
f.close()
game = []
player = []
color = []
move = []
timestamp_1 = []
timestamp_2 = []
game_counter = 1
for match in data:
move_counter = 1
for mv in match["moves"]:
game.append(game_counter)
c = "White"
if move_counter % 2 == 0:
c = "Black"
move_counter = move_counter + 1
move.append(mv)
timestamp_1.append(move_counter-1)
#timestamp_2.append(match["timestamp"][move_counter-2])
color.append(c)
player.append(match[c])
game_counter = game_counter + 1
#df = pd.DataFrame({"game":game, "player":player, "color":color, "move":move, "turn":timestamp_1, "timestamp":timestamp_2})
df = | pd.DataFrame({"game":game, "player":player, "color":color, "move":move, "turn":timestamp_1}) | pandas.DataFrame |
import datetime as dt
from functools import wraps
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
import seaice.nasateam as nt
import seaice.sedna.sedna as sedna
from seaice.sedna.cube import ConcentrationCube as Cube
TestCase.maxDiff = None
TODAY_PERIOD = pd.Period(dt.date.today(), 'D')
class mock_today(object):
def __init__(self, year, month, day):
self.date = dt.date(year, month, day)
def __call__(self, func):
@wraps(func)
def func_wrapper(*args):
with patch('seaice.sedna.sedna.dt_date') as mock_date:
mock_date.today.return_value = self.date
mock_date.side_effect = lambda *args_, **kw: dt.date(*args_, **kw)
return func(*args)
return func_wrapper
class Test__poly_fit_delta(TestCase):
def _add_index(self, series):
index = pd.period_range(start=dt.date(2001, 1, 1), periods=len(series), freq='D')
series.index = index
return series
def test_valid_data(self):
s = self._add_index(pd.Series([0, 1, 2, 3, 4]))
actual = sedna._poly_fit_delta(s)
expected = 0
self.assertAlmostEqual(expected, actual)
def test_delta(self):
s = self._add_index(pd.Series([0, 5, 10, 15, 15]))
expected = -5
actual = sedna._poly_fit_delta(s)
self.assertAlmostEqual(expected, actual)
def test_interpolate_missing_correctly(self):
s = self._add_index(pd.Series([0, 1, 2, np.nan, np.nan, 5.1]))
actual = sedna._poly_fit_delta(s)
expected = .1
self.assertAlmostEqual(expected, actual, delta=.00001)
def test_interpolate_missing_beginning(self):
s = self._add_index(pd.Series([np.nan, np.nan, 3, 4, 5, 6, 7.5]))
actual = sedna._poly_fit_delta(s)
expected = .5
self.assertAlmostEqual(expected, actual, delta=.00001)
def test_missing_target_returns_nan(self):
s = self._add_index(pd.Series([1, 2, 3, 4, 5, 6, np.nan, np.nan]))
actual = sedna._poly_fit_delta(s)
self.assertTrue(np.isnan(actual))
def test_huge_delta(self):
s = self._add_index(pd.Series([1, 2, 3, np.nan, np.nan, 60]))
expected = 54
actual = sedna._poly_fit_delta(s)
self.assertAlmostEqual(expected, actual)
class Test__set_failed_qa_flag(TestCase):
eval_days = 3
regression_delta_km2 = 1
def _generate_expected_series(self, index, expected_values):
return pd.Series(expected_values, index, name='failed_qa')
def _set_up_input_frame(self, extents):
test_index = pd.period_range(start='2015-01-01', periods=len(extents), freq='D')
test_frame = pd.DataFrame(data={'total_extent_km2': extents,
'failed_qa': [''] * len(extents),
'filename': [['foo']] * len(extents)},
index=test_index)
return test_frame
def test_set_failed_qa_flag_if_file_blank(self):
extents_length = 5
test_frame = self._set_up_input_frame(np.arange(extents_length))
test_frame['filename'] = [[]] * extents_length
frame = sedna._set_failed_qa_flag(test_frame, self.eval_days, self.regression_delta_km2)
expected_values = ['', '', '', False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_valid_data(self):
test_frame = self._set_up_input_frame(np.arange(11))
frame = sedna._set_failed_qa_flag(test_frame, self.eval_days, self.regression_delta_km2)
expected_values = ['', '', '', False, False, False, False, False, False, False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_missing_data_marked(self):
test_frame = self._set_up_input_frame([0, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, 10])
frame = sedna._set_failed_qa_flag(test_frame, self.eval_days, self.regression_delta_km2)
expected_values = ['', '', '', False, True, True, '', '', False, False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_missing_data_spike(self):
eval_days = 5
test_frame = self._set_up_input_frame([1, 2, 3, np.nan, np.nan, 60, 7, 8, 9, 10])
frame = sedna._set_failed_qa_flag(test_frame, eval_days, self.regression_delta_km2)
expected_values = ['', '', '', '', '', True, False, False, False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_missing_data_with_bad_trailing_data(self):
test_frame = self._set_up_input_frame([1, 2, 3, np.nan, np.nan, 1000, 10000])
frame = sedna._set_failed_qa_flag(test_frame, self.eval_days, self.regression_delta_km2)
expected_values = ['', '', '', True, True, '', '']
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_mixed_data(self):
test_frame = self._set_up_input_frame([1, 2, 3, 4, 5, 6, 100, -50, 9, 10])
frame = sedna._set_failed_qa_flag(test_frame, 4, self.regression_delta_km2)
expected_values = ['', '', '', '', False, False, True, True, False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
def test_missing_initial_data(self):
test_frame = self._set_up_input_frame([np.nan, np.nan, 3, 4, 5, 6, 7])
frame = sedna._set_failed_qa_flag(test_frame, self.eval_days, self.regression_delta_km2)
expected_values = ['', '', '', '', False, False, False]
expected_series = self._generate_expected_series(frame.index, expected_values)
assert_series_equal(frame['failed_qa'], expected_series)
class Test__create_row(TestCase):
def test_works(self):
period = pd.Period(dt.date(2010, 1, 4), 'D')
extent = 13512223.12312
area = 12305092.906
missing = 626.5
metadata = {'files': ['/nt_something_nrt_.bin', '/nt_something_else_nrt_.bin']}
expected = {(period, 'N'): {'total_area_km2': 12305092.906,
'total_extent_km2': 13512223.123,
'filename': ['/nt_something_nrt_.bin',
'/nt_something_else_nrt_.bin'],
'source_dataset': 'nsidc-0081',
'missing_km2': 626.5,
'failed_qa': False}}
actual = sedna._create_row((period, 'N'), extent, area, missing, metadata,
failed_qa=False)
npt.assert_almost_equal(actual[(period, 'N')].pop('total_area_km2'),
expected[(period, 'N')].pop('total_area_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('total_extent_km2'),
expected[(period, 'N')].pop('total_extent_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('missing_km2'),
expected[(period, 'N')].pop('missing_km2'))
self.assertEqual(actual, expected)
def test_works_with_monthly(self):
period = pd.Period(dt.date(2010, 1, 4), 'M')
extent = 13512223.12312
area = 12305092.906
missing = 626.5
metadata = {'files': ['/nt_something_nrt_.bin', '/nt_something_else_nrt_.bin']}
expected = {(period, 'N'): {'total_area_km2': 12305092.906,
'total_extent_km2': 13512223.123,
'filename': ['/nt_something_nrt_.bin',
'/nt_something_else_nrt_.bin'],
'source_dataset': 'nsidc-0081',
'missing_km2': 626.5}}
actual = sedna._create_row((period, 'N'), extent, area, missing, metadata)
npt.assert_almost_equal(actual[(period, 'N')].pop('total_area_km2'),
expected[(period, 'N')].pop('total_area_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('total_extent_km2'),
expected[(period, 'N')].pop('total_extent_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('missing_km2'),
expected[(period, 'N')].pop('missing_km2'))
self.assertEqual(actual, expected)
def test_with_all_masked_extent(self):
period = pd.Period(dt.date(2010, 1, 4), 'D')
extent = np.ma.masked
area = np.ma.masked
missing = 75660222.409
metadata = {'files': ['/nt_something_nrt_.bin', '/nt_something_nrt_else.bin']}
expected = {(period, 'N'): {'total_area_km2': np.ma.masked,
'total_extent_km2': np.ma.masked,
'filename': ['/nt_something_nrt_.bin',
'/nt_something_nrt_else.bin'],
'source_dataset': 'nsidc-0081',
'missing_km2': 75660222.409,
'failed_qa': False}}
actual = sedna._create_row((period, 'N'), extent, area, missing, metadata,
failed_qa=False)
npt.assert_almost_equal(actual[(period, 'N')].pop('missing_km2'),
expected[(period, 'N')].pop('missing_km2'))
self.assertEqual(actual, expected)
def test_with_regional_stats(self):
period = pd.Period(dt.date(2010, 1, 4), 'D')
extent = 13512223.12312
area = 12305092.906
missing = 626.5
metadata = {'files': ['/nt_something_nrt_.bin', '/nt_something_else_nrt_.bin']}
regional_stats = [('Hudson', 20, 4, 1)]
expected = {(period, 'N'): {'total_area_km2': 12305092.906,
'total_extent_km2': 13512223.123,
'filename': ['/nt_something_nrt_.bin',
'/nt_something_else_nrt_.bin'],
'source_dataset': 'nsidc-0081',
'missing_km2': 626.5,
'Hudson_area_km2': 4,
'Hudson_extent_km2': 20,
'Hudson_missing_km2': 1,
'failed_qa': False}}
actual = sedna._create_row((period, 'N'), extent, area, missing, metadata,
regional_stats, failed_qa=False)
npt.assert_almost_equal(actual[(period, 'N')].pop('total_area_km2'),
expected[(period, 'N')].pop('total_area_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('total_extent_km2'),
expected[(period, 'N')].pop('total_extent_km2'))
npt.assert_almost_equal(actual[(period, 'N')].pop('missing_km2'),
expected[(period, 'N')].pop('missing_km2'))
self.assertEqual(actual, expected)
class Test_daily_df_for_monthly_statistics(TestCase):
def config(self, hemi):
return {'hemisphere': {'short_name': hemi}}
def _build_mock_frame(self):
period_index = pd.period_range(start='1978-10-31', end='2017-06-12')
count = len(period_index)
df = pd.DataFrame({
'date': list(period_index) * 2,
'hemisphere': (['N'] * count) + (['S'] * count),
'total_extent_km2': ([1] * count) + ([2] * count)
})
df = df.set_index(['date', 'hemisphere'])
return df
@mock_today(2017, 6, 13)
@patch('seaice.sedna.sedna._dataframe_from_data_store_daily')
def test_cuts_off_incomplete_months(self, mock__dataframe_from_data_store_daily):
mock__dataframe_from_data_store_daily.return_value = self._build_mock_frame()
actual = sedna._daily_df_for_monthly_statistics(self.config('N'))
self.assertEqual(actual.index[0], | pd.Period('1978-11-01', freq='D') | pandas.Period |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# viewer.py - View aggregated i2p network statistics.
# Author: <NAME> <<EMAIL>>
# License: This is free and unencumbered software released into the public domain.
#
# NOTE: This file should never write to the database, only read.
import argparse
import datetime
import math
import matplotlib
# We don't want matplotlib to use X11 (https://stackoverflow.com/a/3054314)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sqlite3
import pandas as pd
from jinja2 import Environment, FileSystemLoader
interval = 3600 # = 60 minutes
num_intervals = 24 # = 20 hours
min_version = 20
min_country = 20
# http://i2p-projekt.i2p/en/docs/how/network-database#routerInfo
# H is left out since it's almost always empty.
#netdb_caps = ['f','H','K','L','M','N','O','P','R','U','X',]
netdb_caps = ['f','K','L','M','N','O','P','R','U','X',]
# Used in the plots.
generation_time = str(datetime.datetime.utcnow())[:-7]
site = 'http://nacl.i2p/stats'
ONE_DAY = 5*24*60*60
FIVE_DAYS = 5*24*60*60
FIFTEEN_DAYS = 15*24*60*60
THIRTY_DAYS = 30*24*60*60
ACTIVE_TIME = THIRTY_DAYS
def query_db(conn, query, args=(), one=False):
cur = conn.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
# TODO: Port to Pandas
def pie_graph(conn, query, output, title='', lower=0, log=False):
labels = []
sizes = []
res = query_db(conn, query)
# Sort so the graph doesn't look like complete shit.
res = sorted(res, key=lambda tup: tup[1])
for row in res:
if row[1] > lower:
labels.append(row[0])
if log:
sizes.append(math.log(row[1]))
else:
sizes.append(row[1])
# Normalize.
norm = [float(i)/sum(sizes) for i in sizes]
plt.pie(norm,
labels=labels,
shadow=True,
startangle=90,
)
plt.figtext(.1,.03,'{}\n{} UTC'.format(site,generation_time))
plt.axis('equal')
plt.legend()
plt.title(title)
plt.savefig(output)
plt.close()
def plot_x_y(conn, query, output, title='', xlab='', ylab=''):
df = | pd.read_sql_query(query, conn) | pandas.read_sql_query |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
pdt.assert_series_equal(result['d_9'], expected)
# disagree values as nan
expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na')
pdt.assert_series_equal(result['d_na'], expected)
# disagree values as string
expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str')
pdt.assert_series_equal(result['d_str'], expected)
# tests/test_compare.py:TestCompareNumeric
class TestCompareNumeric(TestData):
"""Test the numeric comparison methods."""
def test_numeric(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 2, 3, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', 'step', offset=2)
comp.numeric('col', 'col', method='step', offset=2)
comp.numeric('col', 'col', 'step', 2)
result = comp.compute(ix, A, B)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1)
pdt.assert_series_equal(result[1], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
def test_numeric_with_missings(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', scale=2)
comp.numeric('col', 'col', scale=2, missing_value=0)
comp.numeric('col', 'col', scale=2, missing_value=123.45)
comp.numeric('col', 'col', scale=2, missing_value=nan)
comp.numeric('col', 'col', scale=2, missing_value='str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Missing values as 0
expected = Series(
[1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1)
pdt.assert_series_equal(result[1], expected)
# Missing values as 123.45
expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
# Missing values as nan
expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3)
pdt.assert_series_equal(result[3], expected)
# Missing values as string
expected = Series(
[1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4)
pdt.assert_series_equal(result[4], expected)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms(self, alg):
A = DataFrame({'col': [1, 1, 1, 1, 1]})
B = DataFrame({'col': [1, 2, 3, 4, 5]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='step', offset=1, label='step')
comp.numeric(
'col', 'col', method='linear', offset=1, scale=2, label='linear')
comp.numeric(
'col', 'col', method='squared', offset=1, scale=2, label='squared')
comp.numeric(
'col', 'col', method='exp', offset=1, scale=2, label='exp')
comp.numeric(
'col', 'col', method='gauss', offset=1, scale=2, label='gauss')
result_df = comp.compute(ix, A, B)
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
if alg != 'step':
print(alg)
print(result)
# sim(scale) = 0.5
expected_bool = Series(
[False, False, False, True, False], index=ix, name=alg)
pdt.assert_series_equal(result == 0.5, expected_bool)
# sim(offset) = 1
expected_bool = Series(
[True, True, False, False, False], index=ix, name=alg)
pdt.assert_series_equal(result == 1.0, expected_bool)
# sim(scale) larger than 0.5
expected_bool = Series(
[False, False, True, False, False], index=ix, name=alg)
pdt.assert_series_equal((result > 0.5) & (result < 1.0),
expected_bool)
# sim(scale) smaller than 0.5
expected_bool = Series(
[False, False, False, False, True], index=ix, name=alg)
pdt.assert_series_equal((result < 0.5) & (result >= 0.0),
expected_bool)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms_errors(self, alg):
# scale negative
if alg != "step":
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
# offset negative
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=-2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
def test_numeric_does_not_exist(self):
# raise when algorithm doesn't exists
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
# tests/test_compare.py:TestCompareDates
class TestCompareDates(TestData):
"""Test the exact comparison method."""
def test_dates(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col')
result = comp.compute(ix, A, B)[0]
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0)
pdt.assert_series_equal(result, expected)
def test_date_incorrect_dtype(self):
A = DataFrame({
'col':
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']
})
B = DataFrame({
'col': [
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
A['col1'] = to_datetime(A['col'])
B['col1'] = to_datetime(B['col'])
comp = recordlinkage.Compare()
comp.date('col', 'col1')
pytest.raises(ValueError, comp.compute, ix, A, B)
comp = recordlinkage.Compare()
comp.date('col1', 'col')
pytest.raises(ValueError, comp.compute, ix, A, B)
def test_dates_with_missings(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='m_')
comp.date('col', 'col', missing_value=0, label='m_0')
comp.date('col', 'col', missing_value=123.45, label='m_float')
comp.date('col', 'col', missing_value=nan, label='m_na')
comp.date('col', 'col', missing_value='str', label='m_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_')
pdt.assert_series_equal(result['m_'], expected)
# Missing values as 0
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0')
pdt.assert_series_equal(result['m_0'], expected)
# Missing values as 123.45
expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float')
pdt.assert_series_equal(result['m_float'], expected)
# Missing values as nan
expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na')
pdt.assert_series_equal(result['m_na'], expected)
# Missing values as string
expected = Series(
[1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str')
pdt.assert_series_equal(result['m_str'], expected)
def test_dates_with_swap(self):
months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45),
(2, 1, 123.45)]
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='s_')
comp.date(
'col', 'col', swap_month_day=0, swap_months='default', label='s_1')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months='default',
label='s_2')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months=months_to_swap,
label='s_3')
comp.date(
'col',
'col',
swap_month_day=nan,
swap_months='default',
missing_value=nan,
label='s_4')
comp.date('col', 'col', swap_month_day='str', label='s_5')
result = comp.compute(ix, A, B)
# swap_month_day as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_')
pdt.assert_series_equal(result['s_'], expected)
# swap_month_day and swap_months as 0
expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1')
pdt.assert_series_equal(result['s_1'], expected)
# swap_month_day 123.45 (float)
expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2')
pdt.assert_series_equal(result['s_2'], expected)
# swap_month_day and swap_months 123.45 (float)
expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3')
pdt.assert_series_equal(result['s_3'], expected)
# swap_month_day and swap_months as nan
expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4')
pdt.assert_series_equal(result['s_4'], expected)
# swap_month_day as string
expected = Series(
[1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5')
pdt.assert_series_equal(result['s_5'], expected)
# tests/test_compare.py:TestCompareGeo
class TestCompareGeo(TestData):
"""Test the geo comparison method."""
def test_geo(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step',
offset=50) # 50 km range
result = comp.compute(ix, A, B)
# Missing values as default [36.639460, 54.765854, 44.092472]
expected = Series([1.0, 0.0, 1.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
def test_geo_batch(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='linear',
offset=1,
scale=2,
label='linear')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='squared',
offset=1,
scale=2,
label='squared')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='exp',
offset=1,
scale=2,
label='exp')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='gauss',
offset=1,
scale=2,
label='gauss')
result_df = comp.compute(ix, A, B)
print(result_df)
for alg in ['step', 'linear', 'squared', 'exp', 'gauss']:
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
def test_geo_does_not_exist(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo('lat', 'lng', 'lat', 'lng', method='unknown')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareStrings(TestData):
"""Test the exact comparison method."""
def test_defaults(self):
# default algorithm is levenshtein algorithm
# test default values are indentical to levenshtein
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', label='default')
comp.string('col', 'col', method='levenshtein', label='with_args')
result = comp.compute(ix, A, B)
pdt.assert_series_equal(
result['default'].rename(None),
result['with_args'].rename(None)
)
def test_fuzzy(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='jaro', missing_value=0)
comp.string('col', 'col', method='q_gram', missing_value=0)
comp.string('col', 'col', method='cosine', missing_value=0)
comp.string('col', 'col', method='jaro_winkler', missing_value=0)
comp.string('col', 'col', method='dameraulevenshtein', missing_value=0)
comp.string('col', 'col', method='levenshtein', missing_value=0)
result = comp.compute(ix, A, B)
print(result)
assert result.notnull().all(1).all(0)
assert (result[result.notnull()] >= 0).all(1).all(0)
assert (result[result.notnull()] <= 1).all(1).all(0)
def test_threshold(self):
A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]})
B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.5,
missing_value=2.0,
label="x_col1"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=1.0,
missing_value=0.5,
label="x_col2"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.0,
missing_value=nan,
label="x_col3"
)
result = comp.compute(ix, A, B)
expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1")
pdt.assert_series_equal(result["x_col1"], expected)
expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2")
pdt.assert_series_equal(result["x_col2"], expected)
expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3")
pdt.assert_series_equal(result["x_col3"], expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_incorrect_input(self, alg):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = | DataFrame({'col': [1, 1, 1, nan, nan]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
General toolboxs
'''
import sys
import time
import inspect
import numpy as np
import pandas as pd
from functools import reduce, wraps
from random import randint, random, uniform
from dramkit.logtools.utils_logger import logger_show
from dramkit.speedup.multi_thread import SingleThread
PYTHON_VERSION = float(sys.version[:3])
class StructureObject(object):
'''类似于MATLAB结构数组,存放变量,便于直接赋值和查看'''
def __init__(self, dirt_modify=True, **kwargs):
'''初始化'''
self.set_dirt_modify(dirt_modify)
self.set_from_dict(kwargs)
@property
def dirt_modify(self):
return self.__dirt_modify
def set_dirt_modify(self, dirt_modify):
assert isinstance(dirt_modify, bool)
self.__dirt_modify = dirt_modify
def __setattr__(self, key, value):
_defaults = ['__dirt_modify', '__logger']
_defaults = ['_StructureObject' + x for x in _defaults]
if key in _defaults:
self.__dict__[key] = value
return
if self.dirt_modify:
self.__dict__[key] = value
else:
raise DirtModifyError(
'不允许直接赋值,请调用`set_key_value`或`set_from_dict`方法!')
def __repr__(self):
'''查看时以key: value格式打印'''
_defaults = ['__dirt_modify', '__logger']
_defaults = ['_StructureObject' + x for x in _defaults]
return ''.join('{}: {}\n'.format(k, v) for k, v in self.__dict__.items() \
if k not in _defaults)
def set_key_value(self, key, value):
self.__dict__[key] = value
def set_from_dict(self, d):
'''通过dict批量增加属性变量'''
assert isinstance(d, dict), '必须为dict格式'
self.__dict__.update(d)
def pop(self, key):
'''删除属性变量key,有返回'''
return self.__dict__.pop(key)
def remove(self, key):
'''删除属性变量key,无返回'''
del self.__dict__[key]
def clear(self):
'''清空所有属性变量'''
self.__dict__.clear()
class DirtModifyError(Exception):
pass
def run_func_with_timeout(func, args, timeout=10):
'''
| 限定时间(timeout秒)执行函数func,若限定时间内未执行完毕,返回None
| args为tuple或list,为func函数接受的参数列表
'''
task = SingleThread(func, args, False) # 创建线程
task.start() # 启动线程
task.join(timeout=timeout) # 最大执行时间
# 若超时后,线程依旧运行,则强制结束
if task.is_alive():
task.stop_thread()
return task.get_result()
def get_func_arg_names(func):
'''获取函数func的参数名称列表'''
return inspect.getfullargspec(func).args
def try_repeat_run(n_max_run=3, logger=None, sleep_seconds=0,
force_rep=False):
'''
| 作为装饰器尝试多次运行指定函数
| 使用场景:第一次运行可能出错,需要再次运行(比如取数据时第一次可能连接超时,需要再取一次)
Parameters
----------
n_max_run : int
最多尝试运行次数
logger : None, logging.Logger
日志记录器
sleep_seconds : int, float
| 多次执行时,上一次执行完成之后需要暂停的时间(秒)
| 注:在force_rep为True时,每次执行完都会暂停,force_rep为False只有报错之后才会暂停
force_rep : bool
若为True,则不论是否报错都强制重复执行,若为False,则只有报错才会重复执行
Returns
-------
result : None, other
若目标函数执行成功,则返回执行结果;若失败,则返回None
Examples
--------
.. code-block:: python
:linenos:
from dramkit import simple_logger
logger = simple_logger()
@try_repeat_run(2, logger=logger, sleep_seconds=0, force_rep=False)
def rand_div(x):
return x / np.random.randint(-1, 1)
def repeat_test(info_):
print(info_)
return rand_div(0)
>>> a = repeat_test('repeat test...')
>>> print(a)
'''
def transfunc(func):
@wraps(func)
def repeater(*args, **kwargs):
'''尝试多次运行func'''
if not force_rep:
n_run, ok = 0, False
while not ok and n_run < n_max_run:
n_run += 1
# logger_show('第%s次运行`%s`...'%(n_run, func.__name__),
# logger, 'info')
try:
result = func(*args, **kwargs)
return result
except:
if n_run == n_max_run:
logger_show('`%s`运行失败,共运行了%s次。'%(func.__name__, n_run),
logger, 'error')
return
else:
if sleep_seconds > 0:
time.sleep(sleep_seconds)
else:
pass
else:
n_run = 0
while n_run < n_max_run:
n_run += 1
try:
result = func(*args, **kwargs)
logger_show('`%s`第%s运行:成功。'%(func.__name__, n_run),
logger, 'info')
except:
result = None
logger_show('`%s`第%s运行:失败。'%(func.__name__, n_run),
logger, 'error')
if sleep_seconds > 0:
time.sleep(sleep_seconds)
return result
return repeater
return transfunc
def log_used_time(logger=None):
'''
作为装饰器记录函数运行用时
Parameters
----------
logger : None, logging.Logger
日志记录器
Examples
--------
.. code-block:: python
:linenos:
from dramkit import simple_logger
logger = simple_logger()
@log_used_time(logger)
def wait():
print('wait...')
time.sleep(3)
>>> wait()
wait...
2021-12-28 12:39:54,013 -utils_logger.py[line: 32] -INFO:
--function `wait` run time: 3.000383s.
See Also
--------
:func:`dramkit.gentools.print_used_time`
References
----------
- https://www.cnblogs.com/xiuyou/p/11283512.html
- https://www.cnblogs.com/slysky/p/9777424.html
- https://www.cnblogs.com/zhzhang/p/11375574.html
- https://www.cnblogs.com/zhzhang/p/11375774.html
- https://blog.csdn.net/weixin_33711647/article/details/92549215
'''
def transfunc(func):
@wraps(func)
def timer(*args, **kwargs):
'''运行func并记录用时'''
t0 = time.time()
result = func(*args, **kwargs)
t = time.time()
logger_show('function `%s` run time: %ss.'%(func.__name__, round(t-t0, 6)),
logger, 'info')
return result
return timer
return transfunc
def print_used_time(func):
'''
作为装饰器打印函数运行用时
Parameters
----------
func : function
需要记录运行用时的函数
Examples
--------
.. code-block:: python
:linenos:
@print_used_time
def wait():
print('wait...')
time.sleep(3)
>>> wait()
wait...
function `wait` run time: 3.008314s.
See Also
--------
:func:`dramkit.gentools.log_used_time`
References
----------
- https://www.cnblogs.com/slysky/p/9777424.html
- https://www.cnblogs.com/zhzhang/p/11375574.html
'''
@wraps(func)
def timer(*args, **kwargs):
'''运行func并print用时'''
t0 = time.time()
result = func(*args, **kwargs)
t = time.time()
print('function `%s` run time: %ss.'%(func.__name__, round(t-t0, 6)))
return result
return timer
def get_update_kwargs(key, arg, kwargs, arg_default=None,
func_update=None):
'''
取出并更新kwargs中key参数的值
使用场景:当一个函数接受**kwargs参数,同时需要对kwargs里面的某个key的值进行更新并且
提取出来单独传参
Parameters
----------
key :
kwargs中待取出并更新的关键字
arg :
关键字key对应的新值
kwargs : dict
关键字参数对
arg_default : key关键词对应参数默认值
func_update : None, False, function
自定义取出的key参数值更新函数: arg_new = func_update(arg, arg_old)
- 若为False, 则不更新,直接取出原来key对应的参数值或默认值
- 若为`replace`, 则直接替换更新
- 若为None, 则 **默认** 更新方式为:
* 若参数值arg为dict或list类型,则增量更新
* 若参数值arg_old为list且arg不为list,则增量更新
* 其它情况直接替换更新
Returns
-------
arg_new :
取出并更新之后的关键字key对应参数值
kwargs :
删除key之后的kwargs
Examples
--------
>>> key, arg = 'a', 'aa'
>>> kwargs = {'a': 'a', 'b': 'b'}
>>> get_update_kwargs(key, arg, kwargs)
('aa', {'b': 'b'})
>>> key, arg = 'a', {'a_': 'aa__'}
>>> kwargs = {'a': {'a': 'aa'}, 'b': 'b'}
>>> get_update_kwargs(key, arg, kwargs)
({'a': 'aa', 'a_': 'aa__'}, {'b': 'b'})
>>> key, arg = 'a', ['aa', 'aa_']
>>> kwargs = {'a': ['a'], 'b': 'b'}
>>> get_update_kwargs(key, arg, kwargs)
(['a', 'aa', 'aa_'], {'b': 'b'})
>>> key, arg = 'a', ['aa', 'aa_']
>>> kwargs = {'a': ['a'], 'b': 'b'}
>>> get_update_kwargs(key, arg, kwargs, func_update='replace')
(['aa', 'aa_'], {'b': 'b'})
>>> key, arg = 'a', 'aa_'
>>> kwargs = {'a': ['a'], 'b': 'b'}
>>> get_update_kwargs(key, arg, kwargs)
(['a', 'aa_'], {'b': 'b'})
'''
def _default_update(arg, arg_old):
if isinstance(arg, dict):
assert isinstance(arg_old, dict) or isnull(arg_old)
arg_new = {} if isnull(arg_old) else arg_old
arg_new.update(arg)
elif isinstance(arg, list):
assert isinstance(arg_old, list) or isnull(arg_old)
arg_new = [] if isnull(arg_old) else arg_old
arg_new += arg
elif isinstance(arg_old, list) and not isinstance(arg, list):
arg_new = arg_old + [arg]
else:
arg_new = arg
return arg_new
# 取出原来的值
if key in kwargs.keys():
arg_old = kwargs[key]
del kwargs[key]
else:
arg_old = arg_default
# 不更新
if func_update is False:
return arg_old, kwargs
# 更新
if func_update is None:
func_update = _default_update
elif func_update == 'replace':
func_update = lambda arg, arg_old: arg
arg_new = func_update(arg, arg_old)
return arg_new, kwargs
def roulette_base(fitness):
'''
基本轮盘赌法
Parameters
----------
fitness : list
所有备选对象的fitness值列表
.. note::
fitness的元素值应为正,且fitness值越大,被选中概率越大
:returns: `int` - 返回被选中对象的索引号
References
----------
https://blog.csdn.net/armwangEric/article/details/50775206
'''
sum_fits = sum(fitness)
rand_point = uniform(0, sum_fits)
accumulator = 0.0
for idx, fitn in enumerate(fitness):
accumulator += fitn
if accumulator >= rand_point:
return idx
def roulette_stochastic_accept(fitness):
'''
轮盘赌法,随机接受法
Parameters
----------
fitness : list
所有备选对象的fitness值列表
.. note::
fitness的元素值应为正,且fitness值越大,被选中概率越大
:returns: `int` - 返回被选中对象的索引号
References
----------
https://blog.csdn.net/armwangEric/article/details/50775206
'''
n = len(fitness)
max_fit = max(fitness)
while True:
idx = randint(0, n-1)
if random() <= fitness[idx] / max_fit:
return idx
def roulette_count(fitness, n=10000, rand_func=None):
'''
轮盘赌法n次模拟,返回每个备选对象在n次模拟中被选中的次数
Parameters
----------
fitness : list, dict
所有备选对象的fitness值列表或字典,格式参见Example
.. note::
fitness的元素值应为正,且fitness值越大,被选中概率越大
n : int
模拟次数
rand_func : None, function
| 指定轮盘赌法函数,如'roulette_base'(:func:`dramkit.gentools.roulette_base`)
| 或'roulette_stochastic_accept'(:func:`dramkit.gentools.roulette_stochastic_accept`),
| 默认用'roulette_stochastic_accept'
:returns: `list, dict` - 返回每个对象在模拟n次中被选中的次数
Examples
--------
>>> fitness = [1, 2, 3]
>>> roulette_count(fitness, n=6000)
[(0, 991), (1, 2022), (2, 2987)]
>>> fitness = (1, 2, 3)
>>> roulette_count(fitness, n=6000)
[(0, 1003), (1, 1991), (2, 3006)]
>>> fitness = [('a', 1), ('b', 2), ('c', 3)]
>>> roulette_count(fitness, n=6000)
[('a', 997), ('b', 1989), ('c', 3014)]
>>> fitness = [['a', 1], ['b', 2], ['c', 3]]
>>> roulette_count(fitness, n=6000)
[('a', 1033), ('b', 1967), ('c', 3000)]
>>> fitness = {'a': 1, 'b': 2, 'c': 3}
>>> roulette_count(fitness, n=6000)
{'a': 988, 'b': 1971, 'c': 3041}
'''
if rand_func is None:
rand_func = roulette_stochastic_accept
if isinstance(fitness, dict):
keys, vals = [], []
for k, v in fitness.items():
keys.append(k)
vals.append(v)
randpicks = [rand_func(vals) for _ in range(n)]
idx_picks = [(x, randpicks.count(x)) for x in range(len(vals))]
return {keys[x[0]]: x[1] for x in idx_picks}
elif (isinstance(fitness[0], list) or isinstance(fitness[0], tuple)):
keys, vals = [], []
for k, v in fitness:
keys.append(k)
vals.append(v)
randpicks = [rand_func(vals) for _ in range(n)]
idx_picks = [(x, randpicks.count(x)) for x in range(len(vals))]
return [(keys[x[0]], x[1]) for x in idx_picks]
elif (isinstance(fitness[0], int) or isinstance(fitness[0], float)):
randpicks = [rand_func(fitness) for _ in range(n)]
idx_picks = [(x, randpicks.count(x)) for x in range(len(fitness))]
return idx_picks
def rand_sum(target_sum, n, lowests, highests, isint=True, n_dot=6):
'''
在指定最大最小值范围内随机选取若干个随机数,所选取数之和为定值
Parameters
----------
target_sum : int, float
目标和
n : int
随机选取个数
lowests : int, floot, list
随机数下限值,若为list,则其第k个元素对应第k个随机数的下限
highests : int, floot, list
随机数上限值,若为list,则其第k关元素对应第k个随机数的上限
isint : bool
所选数是否强制为整数,若为False,则为实数
.. note::
若输入lowests或highests不是int,则isint为True无效
n_dot : int
动态上下界值与上下限比较时控制小数位数(为了避免python精度问题导致的报错)
:returns: `list` - 随机选取的n个数,其和为target_sum
Examples
--------
>>> rand_sum(100, 2, [20, 30], 100)
[65, 35]
>>> rand_sum(100, 2, 20, 100)
[41, 59]
>>> rand_sum(100, 2, [20, 10], [100, 90])
[73, 27]
'''
if not (isinstance(lowests, int) or isinstance(lowests, float)):
if len(lowests) != n:
raise ValueError('下限值列表(数组)lowests长度必须与n相等!')
if not (isinstance(highests, int) or isinstance(highests, float)):
if len(highests) != n:
raise ValueError('上限值列表(数组)highests长度必须与n相等!')
# lowests、highests组织成list
if isinstance(lowests, int) or isinstance(lowests, float):
lowests = [lowests] * n
if isinstance(highests, int) or isinstance(highests, float):
highests = [highests] * n
if any([isinstance(x, float) for x in lowests]) or any([isinstance(x, float) for x in highests]):
isint = False
LowHigh = list(zip(lowests, highests))
def _dyLowHigh(tgt_sum, low_high, n_dot=6):
'''
动态计算下界和上界
tgt_sum为目标和,low_high为上下界对组成的列表
n_dot为小数保留位数(为了避免python精度问题导致的报错)
'''
restSumHigh = sum([x[1] for x in low_high[1:]])
restSumLow = sum([x[0] for x in low_high[1:]])
low = max(tgt_sum-restSumHigh, low_high[0][0])
if round(low, n_dot) > low_high[0][1]:
raise ValueError(
'下界({})超过最大值上限({})!'.format(low, low_high[0][1]))
high = min(tgt_sum-restSumLow, low_high[0][1])
if round(high, n_dot) < low_high[0][0]:
raise ValueError(
'上界({})超过最小值下限({})!'.format(high, low_high[0][0]))
return low, high
S = 0
adds = []
low, high = _dyLowHigh(target_sum, LowHigh, n_dot=n_dot)
while len(adds) < n-1:
# 每次随机选择一个数
if isint:
randV = randint(low, high)
else:
randV = random() * (high-low) + low
# 判断当前所选择的备选数是否符合条件,若符合则加入备选数,
# 若不符合则删除所有备选数重头开始
restSum = target_sum - (S + randV)
restSumLow = sum([x[0] for x in LowHigh[len(adds)+1:]])
restSumHigh = sum([x[1] for x in LowHigh[len(adds)+1:]])
if restSumLow <= restSum <= restSumHigh:
S += randV
adds.append(randV)
low, high = _dyLowHigh(target_sum-S, LowHigh[len(adds):],
n_dot=n_dot)
else:
S = 0
adds = []
low, high = _dyLowHigh(target_sum, LowHigh, n_dot=n_dot)
adds.append(target_sum-sum(adds)) # 最后一个备选数
return adds
def rand_weight_sum(weight_sum, n, lowests, highests, weights=None, n_dot=6):
'''
在指定最大最小值范围内随机选取若干个随机数,所选取数之加权和为定值
Parameters
----------
weight_sum : float
目标加权和
n : int
随机选取个数
lowests : int, floot, list
随机数下限值,若为list,则其第k个元素对应第k个随机数的下限
highests : int, floot, list
随机数上限值,若为list,则其第k关元素对应第k个随机数的上限
weights : None, list
权重列表
.. note::
lowests和highests与weights应一一对应
n_dot : int
动态上下界值与上下限比较时控制小数位数(为了避免python精度问题导致的报错)
:returns: `list` - 随机选取的n个数,其以weights为权重的加权和为weight_sum
Examples
--------
>>> rand_weight_sum(60, 2, [20, 30], 100)
[21.41082008017613, 98.58917991982386]
>>> rand_weight_sum(70, 2, 20, 100)
[56.867261610484356, 83.13273838951565]
>>> rand_weight_sum(80, 2, [20, 10], [100, 90])
[80.32071140116187, 79.67928859883813]
>>> rand_weight_sum(80, 2, [20, 10], [100, 90], [0.6, 0.4])
[88.70409567475888, 66.94385648786168]
>>> rand_weight_sum(180, 2, [20, 10], [100, 90], [3, 2])
[23.080418085462018, 55.37937287180697]
'''
if weights is not None and len(weights) != n:
raise ValueError('权重列表W的长度必须等于n!')
if not (isinstance(lowests, int) or isinstance(lowests, float)):
if len(lowests) != n:
raise ValueError('下限值列表(数组)lowests长度必须与n相等!')
if not (isinstance(highests, int) or isinstance(highests, float)):
if len(highests) != n:
raise ValueError('上限值列表(数组)highests长度必须与n相等!')
# weights和lowests、highests组织成list
if weights is None:
weights = [1/n] * n
if isinstance(lowests, int) or isinstance(lowests, float):
lowests = [lowests] * n
if isinstance(highests, int) or isinstance(highests, float):
highests = [highests] * n
WLowHigh = list(zip(weights, lowests, highests))
def _dyLowHigh(wt_sum, w_low_high, n_dot=6):
'''
动态计算下界和上界
wt_sum为目标加权和,w_low_high为权重和上下界三元组组成的列表
n_dot为小数保留位数(为了避免python精度问题导致的报错)
'''
restSumHigh = sum([x[2]*x[0] for x in w_low_high[1:]])
restSumLow = sum([x[1]*x[0] for x in w_low_high[1:]])
low = max((wt_sum-restSumHigh) / w_low_high[0][0], w_low_high[0][1])
if round(low, n_dot) > w_low_high[0][2]:
raise ValueError(
'下界({})超过最大值上限({})!'.format(low, w_low_high[0][2]))
high = min((wt_sum-restSumLow) / w_low_high[0][0], w_low_high[0][2])
if round(high, n_dot) < w_low_high[0][1]:
raise ValueError(
'上界({})超过最小值下限({})!'.format(high, w_low_high[0][1]))
return low, high
S = 0
adds = []
low, high = _dyLowHigh(weight_sum, WLowHigh, n_dot=n_dot)
while len(adds) < n-1:
# 每次随机选择一个数
randV = random() * (high-low) + low
# 判断当前所选择的备选数是否符合条件,若符合则加入备选数,
# 若不符合则删除所有备选数重头开始
restSum = weight_sum - (S + randV * weights[len(adds)])
restSumLow = sum([x[1]*x[0] for x in WLowHigh[len(adds)+1:]])
restSumHigh = sum([x[2]*x[0] for x in WLowHigh[len(adds)+1:]])
if restSumLow <= restSum <= restSumHigh:
S += randV * weights[len(adds)]
adds.append(randV)
low, high = _dyLowHigh(weight_sum-S, WLowHigh[len(adds):],
n_dot=n_dot)
else:
S = 0
adds = []
low, high = _dyLowHigh(weight_sum, WLowHigh, n_dot=n_dot)
aw = zip(adds, weights[:-1])
adds.append((weight_sum-sum([a*w for a, w in aw])) / weights[-1])
return adds
def replace_repeat_iter(series, val, val0, gap=None, keep_last=False):
'''
替换序列中重复出现的值
| series (`pd.Series`) 中若步长为gap的范围内出现多个val值,则只保留第一条记录,
后面的替换为val0
| 若gap为None,则将连续出现的val值只保留第一个,其余替换为val0(这里连续出现val是指
不出现除了val和val0之外的其他值)
| 若keep_last为True,则连续的保留最后一个
返回结果为替换之后的series (`pd.Series`)
Examples
--------
>>> data = pd.DataFrame([0, 1, 1, 0, -1, -1, 2, -1, 1, 0, 1, 1, 1, 0, 0,
... -1, -1, 0, 0, 1], columns=['test'])
>>> data['test_rep'] = replace_repeat_iter(data['test'], 1, 0, gap=None)
>>> data
test test_rep
0 0 0
1 1 1
2 1 0
3 0 0
4 -1 -1
5 -1 -1
6 2 2
7 -1 -1
8 1 1
9 0 0
10 1 0
11 1 0
12 1 0
13 0 0
14 0 0
15 -1 -1
16 -1 -1
17 0 0
18 0 0
19 1 1
>>> series = pd.Series([-1, 1, -1, 0, 1, 0, 1, 1, -1])
>>> replace_repeat_iter(series, 1, 0, gap=5)
0 -1
1 1
2 -1
3 0
4 1
5 0
6 0
7 0
8 -1
'''
if not keep_last:
return _replace_repeat_iter(series, val, val0, gap=gap)
else:
series_ = series[::-1]
series_ = _replace_repeat_iter(series_, val, val0, gap=gap)
return series_[::-1]
def _replace_repeat_iter(series, val, val0, gap=None):
'''
TODO
----
改为不在df里面算(df.loc可能会比较慢?)
'''
col = series.name
df = pd.DataFrame({col: series})
if gap is not None and (gap > df.shape[0] or gap < 1):
raise ValueError('gap取值范围必须为1到df.shape[0]之间!')
gap = None if gap == 1 else gap
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
k = 0
while k < df.shape[0]:
if df.loc[df.index[k], col] == val:
k1 = k + 1
if gap is None:
while k1 < df.shape[0] and \
df.loc[df.index[k1], col] in [val, val0]:
if df.loc[df.index[k1], col] == val:
df.loc[df.index[k1], col] = val0
k1 += 1
else:
while k1 < min(k+gap, df.shape[0]) and \
df.loc[df.index[k1], col] in [val, val0]:
if df.loc[df.index[k1], col] == val:
df.loc[df.index[k1], col] = val0
k1 += 1
k = k1
else:
k += 1
df.index = ori_index
return df[col]
def replace_repeat_pd(series, val, val0, keep_last=False):
'''
| 替换序列中重复出现的值, 仅保留第一个
|
| 函数功能,参数和意义同 :func:`dramkit.gentools.replace_repeat_iter`
| 区别在于计算时在pandas.DataFrame里面进行而不是采用迭代方式,同时取消了gap
参数(即连续出现的val值只保留第一个)
'''
if not keep_last:
return _replace_repeat_pd(series, val, val0)
else:
series_ = series[::-1]
series_ = _replace_repeat_pd(series_, val, val0)
return series_[::-1]
def _replace_repeat_pd(series, val, val0):
col = series.name
df = pd.DataFrame({col: series})
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_ori = col
col = 'series'
df.columns = [col]
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
df['gap1'] = df[col].apply(lambda x: x not in [val, val0]).astype(int)
df['is_val'] = df[col].apply(lambda x: x == val).astype(int)
df['val_or_gap'] = df['gap1'] + df['is_val']
df['pre_gap'] = df[df['val_or_gap'] == 1]['gap1'].shift(1)
df['pre_gap'] = df['pre_gap'].fillna(method='ffill')
k = 0
while k < df.shape[0] and df.loc[df.index[k], 'is_val'] != 1:
k += 1
if k < df.shape[0]:
df.loc[df.index[k], 'pre_gap'] = 1
df['pre_gap'] = df['pre_gap'].fillna(0).astype(int)
df['keep1'] = (df['is_val'] + df['pre_gap']).map({0: 0, 1: 0, 2: 1})
df['to_rplc'] = (df['keep1'] + df['is_val']).map({2: 0, 1: 1, 0: 0})
df[col] = df[[col, 'to_rplc']].apply(lambda x:
val0 if x['to_rplc'] == 1 else x[col], axis=1)
df.rename(columns={col: col_ori}, inplace=True)
df.index = ori_index
return df[col_ori]
def replace_repeat_func_iter(series, func_val, func_val0,
gap=None, keep_last=False):
'''
| 替换序列中重复出现的值,功能与 :func:`dramkit.gentools.replace_repeat_iter`
类似,只不过把val和val0的值由直接指定换成了由指定函数生成
| ``func_val`` 函数用于判断连续条件,其返回值只能是True或False,
| ``func_val0`` 函数用于生成替换的新值。
| 即series中若步长为gap的范围内出现多个满足func_val函数为True的值,
则只保留第一条记录,后面的替换为函数func_val0的值。
| 若gap为None,则将连续出现的满足func_val函数为True的值只保留第一个,其余替换为函数
func_val0的值(这里连续出现是指不出现除了满足func_val为True和等于func_val0函数值
之外的其他值)
返回结果为替换之后的series (`pd.Series`)
Examples
--------
>>> data = pd.DataFrame({'y': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, 1,
... 1, 1, 0, 0, -1, -1, 0, 0, 1]})
>>> data['y_rep'] = replace_repeat_func_iter(
... data['y'], lambda x: x < 1, lambda x: 3, gap=None)
>>> data
y y_rep
0 0 0
1 1 1
2 1 1
3 0 0
4 -1 3
5 -1 3
6 2 2
7 -1 -1
8 1 1
9 0 0
10 1 1
11 1 1
12 1 1
13 0 0
14 0 3
15 -1 3
16 -1 3
17 0 3
18 0 3
19 1 1
'''
if not keep_last:
return _replace_repeat_func_iter(series, func_val, func_val0, gap=gap)
else:
series_ = series[::-1]
series_ = _replace_repeat_func_iter(series_, func_val, func_val0, gap=gap)
return series_[::-1]
def _replace_repeat_func_iter(series, func_val, func_val0, gap=None):
col = series.name
df = pd.DataFrame({col: series})
if gap is not None and (gap > df.shape[0] or gap < 1):
raise ValueError('gap取值范围必须为1到df.shape[0]之间!')
gap = None if gap == 1 else gap
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
k = 0
while k < df.shape[0]:
if func_val(df.loc[df.index[k], col]):
k1 = k + 1
if gap is None:
while k1 < df.shape[0] and \
(func_val(df.loc[df.index[k1], col]) \
or df.loc[df.index[k1], col] == \
func_val0(df.loc[df.index[k1], col])):
if func_val(df.loc[df.index[k1], col]):
df.loc[df.index[k1], col] = \
func_val0(df.loc[df.index[k1], col])
k1 += 1
else:
while k1 < min(k+gap, df.shape[0]) and \
(func_val(df.loc[df.index[k1], col]) \
or df.loc[df.index[k1], col] == \
func_val0(df.loc[df.index[k1], col])):
if func_val(df.loc[df.index[k1], col]):
df.loc[df.index[k1], col] = \
func_val0(df.loc[df.index[k1], col])
k1 += 1
k = k1
else:
k += 1
df.index = ori_index
return df[col]
def replace_repeat_func_pd(series, func_val, func_val0, keep_last=False):
'''
替换序列中重复出现的值, 仅保留第一个
| 函数功能,参数和意义同 :func:`dramkit.gentools.replace_repeat_func_iter`
| 区别在于计算时在pandas.DataFrame里面进行而不是采用迭代方式
| 同时取消了gap参数(即连续出现的满足func_val为True的值只保留第一个)
'''
if not keep_last:
return _replace_repeat_func_pd(series, func_val, func_val0)
else:
series_ = series[::-1]
series_ = _replace_repeat_func_pd(series_, func_val, func_val0)
return series_[::-1]
def _replace_repeat_func_pd(series, func_val, func_val0):
col = series.name
df = pd.DataFrame({col: series})
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_ori = col
col = 'series'
df.columns = [col]
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
df['gap1'] = df[col].apply(lambda x:
not func_val(x) and x != func_val0(x)).astype(int)
df['is_val'] = df[col].apply(lambda x: func_val(x)).astype(int)
df['val_or_gap'] = df['gap1'] + df['is_val']
df['pre_gap'] = df[df['val_or_gap'] == 1]['gap1'].shift(1)
df['pre_gap'] = df['pre_gap'].fillna(method='ffill')
k = 0
while k < df.shape[0] and df.loc[df.index[k], 'is_val'] != 1:
k += 1
if k < df.shape[0]:
df.loc[df.index[k], 'pre_gap'] = 1
df['pre_gap'] = df['pre_gap'].fillna(0).astype(int)
df['keep1'] = (df['is_val'] + df['pre_gap']).map({0: 0, 1: 0, 2: 1})
df['to_rplc'] = (df['keep1'] + df['is_val']).map({2: 0, 1: 1, 0: 0})
df[col] = df[[col, 'to_rplc']].apply(
lambda x: func_val0(x[col]) if x['to_rplc'] == 1 else x[col],
axis=1)
df.rename(columns={col: col_ori}, inplace=True)
df.index = ori_index
return df[col_ori]
def con_count(series, func_cond, via_pd=True):
'''
计算series(pd.Series)中连续满足func_cond函数指定的条件的记录数
Parameters
----------
series : pd.Series
目标序列
func_cond : function
指定条件的函数,func_cond(x)返回结果只能为True或False
via_pd : bool
若via_pd为False,则计算时使用循环迭代,否则在pandas.DataFrame里面进行计算
:returns: `pd.Series` - 返回连续计数结果
Examples
--------
>>> df = pd.DataFrame([0, 0, 1, 1, 0, 0, 1, 1, 1], columns=['series'])
>>> func_cond = lambda x: True if x == 1 else False
>>> df['count1'] = con_count(df['series'], func_cond, True)
>>> df
series count1
0 0 0
1 0 0
2 1 1
3 1 2
4 0 0
5 0 0
6 1 1
7 1 2
8 1 3
>>> df['count0'] = con_count(df['series'], lambda x: x != 1, False)
>>> df
series count1 count0
0 0 0 1
1 0 0 2
2 1 1 0
3 1 2 0
4 0 0 1
5 0 0 2
6 1 1 0
7 1 2 0
8 1 3 0
'''
col = 'series'
series.name = col
df = pd.DataFrame(series)
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
if via_pd:
df['Fok'] = df[col].apply(lambda x: func_cond(x)).astype(int)
df['count'] = df['Fok'].cumsum()
df['tmp'] = df[df['Fok'] == 0]['count']
df['tmp'] = df['tmp'].fillna(method='ffill')
df['tmp'] = df['tmp'].fillna(0)
df['count'] = (df['count'] - df['tmp']).astype(int)
df.index = ori_index
return df['count']
else:
df['count'] = 0
k = 0
while k < df.shape[0]:
if func_cond(df.loc[df.index[k], col]):
count = 1
df.loc[df.index[k], 'count'] = count
k1 = k + 1
while k1 < df.shape[0] and func_cond(df.loc[df.index[k1], col]):
count += 1
df.loc[df.index[k1], 'count'] = count
k1 += 1
k = k1
else:
k += 1
df.index = ori_index
return df['count']
def con_count_ignore(series, func_cond, via_pd=True, func_ignore=None):
'''
在 :func:`dramkit.gentools.con_count` 的基础上增加连续性判断条件:
当series中的值满足func_ignore函数值为True时,不影响连续性判断(func_ignore
默认为 ``lambda x: isnull(x)``)
'''
if func_ignore is None:
func_ignore = lambda x: isnull(x)
df = pd.DataFrame({'v': series})
df['ignore'] = df['v'].apply(lambda x: func_ignore(x)).astype(int)
df['count'] = con_count(df[df['ignore'] == 0]['v'], func_cond, via_pd=via_pd)
df['count'] = df['count'].fillna(0)
df['count'] = df['count'].astype(int)
return df['count']
def get_preval_func_cond(data, col_val, col_cond, func_cond):
'''
| 获取上一个满足指定条件的行中col_val列的值,条件为:
| 该行中col_cond列的值x满足func_cond(x)为True (func_cond(x)返回结果只能为True或False)
| 返回结果为 `pd.Series`
Examples
--------
>>> data = pd.DataFrame({'x1': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, 1, 1, 1,
... 0, 0, -1, -1, 0, 0, 1],
... 'x2': [0, 1, 1, 0, -1, -1, 1, -1, 1, 0, 1, 1, 1,
... 0, 0, -1, -1, 0, 0, 1]})
>>> data['x1_pre'] = get_preval_func_cond(data, 'x1', 'x2', lambda x: x != 1)
>>> data
x1 x2 x1_pre
0 0 0 NaN
1 1 1 0.0
2 1 1 0.0
3 0 0 0.0
4 -1 -1 0.0
5 -1 -1 -1.0
6 2 1 -1.0
7 -1 -1 -1.0
8 1 1 -1.0
9 0 0 -1.0
10 1 1 0.0
11 1 1 0.0
12 1 1 0.0
13 0 0 0.0
14 0 0 0.0
15 -1 -1 0.0
16 -1 -1 -1.0
17 0 0 -1.0
18 0 0 0.0
19 1 1 0.0
'''
df = data[[col_val, col_cond]].copy()
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_val, col_cond = ['col_val', 'col_cond']
df.columns = [col_val, col_cond]
df['Fok'] = df[col_cond].apply(lambda x: func_cond(x)).astype(int)
df['val_pre'] = df[df['Fok'] == 1][col_val]
df['val_pre'] = df['val_pre'].shift(1).fillna(method='ffill')
return df['val_pre']
def gap_count(series, func_cond, via_pd=True):
'''
计算series (`pd.Series`)中当前行距离上一个满足 ``func_cond`` 函数指定条件记录的行数
func_cond为指定条件的函数,func_cond(x)返回结果只能为True或False,
若via_pd为False,则使用循环迭代,若via_pd为True,则在pandas.DataFrme内计算
返回结果为 `pd.Series`
Examples
--------
>>> df = pd.DataFrame([0, 1, 1, 0, 0, 1, 1, 1], columns=['series'])
>>> func_cond = lambda x: True if x == 1 else False
>>> df['gap1'] = gap_count(df['series'], func_cond, True)
>>> df
series gap1
0 0 0
1 1 0
2 1 1
3 0 1
4 0 2
5 1 3
6 1 1
7 1 1
>>> df['gap0'] = gap_count(df['series'], lambda x: x != 1, False)
>>> df
series gap1 gap0
0 0 0 0
1 1 0 1
2 1 1 2
3 0 1 3
4 0 2 1
5 1 3 1
6 1 1 2
7 1 1 3
'''
col = 'series'
series.name = col
df = pd.DataFrame(series)
# 当series.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
if via_pd:
df['idx'] = range(0, df.shape[0])
df['idx_pre'] = get_preval_func_cond(df, 'idx', col, func_cond)
df['gap'] = (df['idx'] - df['idx_pre']).fillna(0).astype(int)
df.index = ori_index
return df['gap']
else:
df['count'] = con_count(series, lambda x: not func_cond(x), via_pd=via_pd)
df['gap'] = df['count']
k0 = 0
while k0 < df.shape[0] and not func_cond(df.loc[df.index[k0], col]):
df.loc[df.index[k0], 'gap'] = 0
k0 += 1
for k1 in range(k0+1, df.shape[0]):
if func_cond(df.loc[df.index[k1], col]):
df.loc[df.index[k1], 'gap'] = \
df.loc[df.index[k1-1], 'count'] + 1
df.index = ori_index
return df['gap']
def count_between_gap(data, col_gap, col_count, func_gap, func_count,
count_now_gap=False, count_now=True, via_pd=True):
'''
计算data (`pandas.DataFrame`)中当前行与上一个满足 ``func_gap`` 函数为True的行之间,
满足 ``func_count`` 函数为True的记录数
| 函数func_gap作用于 ``col_gap`` 列,func_count作用于 ``col_count`` 列,
两者返回值均为True或False
| ``count_now_gap`` 设置满足func_gap的行是否参与计数,若为False,
则该行计数为0,若为True,则该行按照上一次计数的最后一次计数处理
.. todo::
增加count_now_gap的处理方式:
- 该行计数为0
- 该行按上一次计数的最后一次计数处理
- 该行按下一次计数的第一次计数处理
``count_now`` 设置当当前行满足func_count时,从当前行开始对其计数还是从下一行开始对其计数
.. note::
注:当前行若满足同时满足func_gap和func_count,对其计数的行不会为下一行
(即要么不计数,要么在当前行对其计数)
若via_pd为True,则调用 :func:`count_between_gap_pd` 实现,否则用 :func:`count_between_gap_iter`
返回结果为 `pd.Series`
Examples
--------
>>> data = pd.DataFrame({'to_gap': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, -1, 1,
... 1, 0, 0, -1, -1, 0, 0, 1],
... 'to_count': [0, 1, 1, 0, -1, -1, 1, -1, 1, 0, 1,
... 1, 1, 0, 0, -1, -1, 0, 0, 1]})
>>> data['gap_count'] = count_between_gap(data, 'to_gap', 'to_count',
... lambda x: x == -1, lambda x: x == 1,
... count_now_gap=False, count_now=False)
>>> data
to_gap to_count gap_count
0 0 0 0
1 1 1 0
2 1 1 0
3 0 0 0
4 -1 -1 0
5 -1 -1 0
6 2 1 0
7 -1 -1 0
8 1 1 0
9 0 0 1
10 -1 1 0
11 1 1 0
12 1 1 1
13 0 0 2
14 0 0 2
15 -1 -1 0
16 -1 -1 0
17 0 0 0
18 0 0 0
19 1 1 0
>>> data = pd.DataFrame({'to_gap': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, -1, 1,
1, 0, 0, -1, -1, 0, 0, 1, -1, -1],
'to_count': [0, 1, 1, 0, -1, -1, 1, -1, 1, 0, 1,
1, 1, 0, 0, -1, 1, 0, 1, 1, 1, 1]})
>>> data['gap_count'] = count_between_gap(data, 'to_gap', 'to_count',
lambda x: x == -1, lambda x: x == 1,
count_now_gap=False, count_now=True)
>>> data
to_gap to_count gap_count
0 0 0 0
1 1 1 0
2 1 1 0
3 0 0 0
4 -1 -1 0
5 -1 -1 0
6 2 1 1
7 -1 -1 0
8 1 1 1
9 0 0 1
10 -1 1 0
11 1 1 1
12 1 1 2
13 0 0 2
14 0 0 2
15 -1 -1 0
16 -1 1 0
17 0 0 0
18 0 1 1
19 1 1 2
20 -1 1 0
21 -1 1 0
>>> data = pd.DataFrame({'to_gap': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, -1, 1,
1, 0, 0, -1, -1, 0, 0, 1, -1, -1],
'to_count': [0, -1, -1, 0, -1, -1, 1, -1, 1, 0, 1, 1,
1, 0, 0, -1, -1, 0, -1, 1, 1, 1]})
>>> data['gap_count'] = count_between_gap(data, 'to_gap', 'to_count',
lambda x: x == -1, lambda x: x == 1,
count_now_gap=True, count_now=False)
>>> data
to_gap to_count gap_count
0 0 0 0
1 1 -1 0
2 1 -1 0
3 0 0 0
4 -1 -1 0
5 -1 -1 0
6 2 1 0
7 -1 -1 1
8 1 1 0
9 0 0 1
10 -1 1 1
11 1 1 0
12 1 1 1
13 0 0 2
14 0 0 2
15 -1 -1 2
16 -1 -1 0
17 0 0 0
18 0 -1 0
19 1 1 0
20 -1 1 1
21 -1 1 0
>>> data = pd.DataFrame({'to_gap': [0, 1, 1, 0, -1, -1, 2, -1, 1, 0, -1, 1,
1, 0, 0, -1, -1, 0, 0, 1, -1, -1],
'to_count': [0, -1, -1, 0, -1, -1, 1, -1, 1, 0, 1, 1,
1, 0, 0, -1, -1, 0, -1, 1, 1, 1]})
>>> data['gap_count'] = count_between_gap(data, 'to_gap', 'to_count',
lambda x: x == -1, lambda x: x == 1,
count_now_gap=True, count_now=True)
>>> data
to_gap to_count gap_count
0 0 0 0
1 1 -1 0
2 1 -1 0
3 0 0 0
4 -1 -1 0
5 -1 -1 0
6 2 1 1
7 -1 -1 1
8 1 1 1
9 0 0 1
10 -1 1 2
11 1 1 1
12 1 1 2
13 0 0 2
14 0 0 2
15 -1 -1 2
16 -1 -1 0
17 0 0 0
18 0 -1 0
19 1 1 1
20 -1 1 2
21 -1 1 1
'''
if via_pd:
return count_between_gap_pd(data, col_gap, col_count, func_gap,
func_count, count_now_gap=count_now_gap,
count_now=count_now)
else:
return count_between_gap_iter(data, col_gap, col_count, func_gap,
func_count, count_now_gap=count_now_gap,
count_now=count_now)
def count_between_gap_pd(data, col_gap, col_count, func_gap, func_count,
count_now_gap=True, count_now=True):
'''参数和功能说明见 :func:`dramkit.gentools.count_between_gap` 函数'''
df = data[[col_gap, col_count]].copy()
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_gap, col_count = ['col_gap', 'col_count']
df.columns = [col_gap, col_count]
df['gap0'] = df[col_gap].apply(lambda x: not func_gap(x)).astype(int)
df['count1'] = df[col_count].apply(lambda x: func_count(x)).astype(int)
df['gap_count'] = df[df['gap0'] == 1]['count1'].cumsum()
df['gap_cut'] = df['gap0'].diff().shift(-1)
df['gap_cut'] = df['gap_cut'].apply(lambda x: 1 if x == -1 else np.nan)
df['tmp'] = (df['gap_count'] * df['gap_cut']).shift(1)
df['tmp'] = df['tmp'].fillna(method='ffill')
df['gap_count'] = df['gap_count'] - df['tmp']
if count_now_gap:
df['pre_gap0'] = df['gap0'].shift(1)
df['tmp'] = df['gap_count'].shift()
df['tmp'] = df[df['gap0'] == 0]['tmp']
df['gap_count1'] = df['gap_count'].fillna(0)
df['gap_count2'] = df['tmp'].fillna(0) + df['count1'] * (1-df['gap0'])
df['gap_count'] = df['gap_count1'] + df['gap_count2']
if not count_now:
df['gap_count'] = df['gap_count'].shift(1)
if not count_now_gap:
df['gap_count'] = df['gap0'] * df['gap_count']
else:
df['gap_count'] = df['pre_gap0'] * df['gap_count']
df['gap_count'] = df['gap_count'].fillna(0).astype(int)
return df['gap_count']
def count_between_gap_iter(data, col_gap, col_count, func_gap, func_count,
count_now_gap=True, count_now=True):
'''参数和功能说明见 :func:`dramkit.gentools.count_between_gap` 函数'''
df = data[[col_gap, col_count]].copy()
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_gap, col_count = ['col_gap', 'col_count']
df.columns = [col_gap, col_count]
# 当data.index存在重复值时为避免报错,因此先重置index最后再还原
ori_index = df.index
df.index = range(0, df.shape[0])
df['gap_count'] = 0
k = 0
while k < df.shape[0]:
if func_gap(df.loc[df.index[k], col_gap]):
k += 1
gap_count = 0
while k < df.shape[0] and \
not func_gap(df.loc[df.index[k], col_gap]):
if func_count(df.loc[df.index[k], col_count]):
gap_count += 1
df.loc[df.index[k], 'gap_count'] = gap_count
k += 1
else:
k += 1
if count_now_gap:
k = 1
while k < df.shape[0]:
if func_gap(df.loc[df.index[k], col_gap]):
if not func_gap(df.loc[df.index[k-1], col_gap]):
if func_count(df.loc[df.index[k], col_count]):
df.loc[df.index[k], 'gap_count'] = \
df.loc[df.index[k-1], 'gap_count'] + 1
k += 1
else:
df.loc[df.index[k], 'gap_count'] = \
df.loc[df.index[k-1], 'gap_count']
k += 1
else:
if func_count(df.loc[df.index[k], col_count]):
df.loc[df.index[k], 'gap_count'] = 1
k += 1
else:
k += 1
else:
k += 1
if not count_now:
df['gap_count_pre'] = df['gap_count'].copy()
if not count_now_gap:
for k in range(1, df.shape[0]):
if func_gap(df.loc[df.index[k], col_gap]):
df.loc[df.index[k], 'gap_count'] = 0
else:
df.loc[df.index[k], 'gap_count'] = \
df.loc[df.index[k-1], 'gap_count_pre']
else:
for k in range(1, df.shape[0]):
if func_gap(df.loc[df.index[k-1], col_gap]):
df.loc[df.index[k], 'gap_count'] = 0
else:
df.loc[df.index[k], 'gap_count'] = \
df.loc[df.index[k-1], 'gap_count_pre']
df.drop('gap_count_pre', axis=1, inplace=True)
k0 = 0
while k0 < df.shape[0] and not func_gap(df.loc[df.index[k0], col_gap]):
df.loc[df.index[k0], 'gap_count'] = 0
k0 += 1
df.loc[df.index[k0], 'gap_count'] = 0
df.index = ori_index
return df['gap_count']
def val_gap_cond(data, col_val, col_cond, func_cond, func_val,
to_cal_col=None, func_to_cal=None, val_nan=np.nan,
contain_1st=False):
'''
计算data (`pandas.DataFrame`)中从上一个 ``col_cond`` 列满足 ``func_cond`` 函数的行
到当前行, ``col_val`` 列记录的 ``func_val`` 函数值
| func_cond作用于col_cond列,func_cond(x)返回True或False,x为单个值
| func_val函数作用于col_val列,func_val(x)返回单个值,x为np.array或pd.Series或列表等
| func_to_cal作用于to_cal_col列,只有当前行func_to_cal值为True时才进行func_val计算,
否则返回结果中当前行值设置为val_nan
| contain_1st设置func_val函数计算时是否将上一个满足func_cond的行也纳入计算
.. todo::
参考 :func:`dramkit.gentools.count_between_gap` 的设置:
- 设置col_cond列满足func_cond函数的行,其参与func_val函数的前一次计算还是下一次计算还是不参与计算
Examples
--------
>>> data = pd.DataFrame({'val': [1, 2, 5, 3, 1, 7 ,9],
... 'sig': [1, 1, -1, 1, 1, -1, 1]})
>>> data['val_pre1'] = val_gap_cond(data, 'val', 'sig',
... lambda x: x == -1, lambda x: max(x))
>>> data
val sig val_pre1
0 1 1 NaN
1 2 1 NaN
2 5 -1 NaN
3 3 1 3.0
4 1 1 3.0
5 7 -1 7.0
6 9 1 9.0
'''
if to_cal_col is None and func_to_cal is None:
df = data[[col_val, col_cond]].copy()
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_val, col_cond = ['col_val', 'col_cond']
df.columns = [col_val, col_cond]
elif to_cal_col is not None and func_to_cal is not None:
df = data[[col_val, col_cond, to_cal_col]].copy()
# 为了避免计算过程中临时产生的列名与原始列名混淆,对列重新命名
col_val, col_cond, to_cal_col = ['col_val', 'col_cond',
'to_cal_col']
df.columns = [col_val, col_cond, to_cal_col]
df['idx'] = range(0, df.shape[0])
df['pre_idx'] = get_preval_func_cond(df, 'idx', col_cond, func_cond)
if to_cal_col is None and func_to_cal is None:
if not contain_1st:
df['gap_val'] = df[['pre_idx', 'idx', col_val]].apply(lambda x:
func_val(df[col_val].iloc[int(x['pre_idx']+1): int(x['idx']+1)]) \
if not isnull(x['pre_idx']) else val_nan, axis=1)
else:
df['gap_val'] = df[['pre_idx', 'idx', col_val]].apply(lambda x:
func_val(df[col_val].iloc[int(x['pre_idx']): int(x['idx']+1)]) \
if not isnull(x['pre_idx']) else val_nan, axis=1)
elif to_cal_col is not None and func_to_cal is not None:
if not contain_1st:
df['gap_val'] = df[['pre_idx', 'idx', col_val,
to_cal_col]].apply(lambda x:
func_val(df[col_val].iloc[int(x['pre_idx']+1): int(x['idx']+1)]) \
if not isnull(x['pre_idx']) and func_to_cal(x[to_cal_col]) else \
val_nan, axis=1)
else:
df['gap_val'] = df[['pre_idx', 'idx', col_val,
to_cal_col]].apply(lambda x:
func_val(df[col_val].iloc[int(x['pre_idx']): int(x['idx']+1)]) \
if not isnull(x['pre_idx']) and func_to_cal(x[to_cal_col]) else \
val_nan, axis=1)
return df['gap_val']
def filter_by_func_prenext(l, func_prenext):
'''
对 ``l`` (`list`)进行过滤,过滤后返回的 ``lnew`` (`list`)任意前后相邻两个元素满足:
func_prenext(lnew[i], lnew[i+1]) = True
过滤过程为:将 ``l`` 的第一个元素作为起点,找到其后第一个满足 ``func_prenext`` 函数
值为True的元素,再以该元素为起点往后寻找...
Examples
--------
>>> l = [1, 2, 3, 4, 1, 1, 2, 3, 6]
>>> func_prenext = lambda x, y: (y-x) >= 2
>>> filter_by_func_prenext(l, func_prenext)
[1, 3, 6]
>>> l = [1, 2, 3, 4, 1, 5, 1, 2, 3, 6]
>>> filter_by_func_prenext(l, func_prenext)
[1, 3, 5]
>>> filter_by_func_prenext(l, lambda x, y: y == x+1)
[1, 2, 3, 4]
>>> l = [(1, 2), (2, 3), (4, 1), (5, 0)]
>>> func_prenext = lambda x, y: abs(y[-1]-x[-1]) == 1
>>> filter_by_func_prenext(l, func_prenext)
[(1, 2), (2, 3)]
'''
if len(l) == 0:
return l
lnew = [l[0]]
idx_pre, idx_post = 0, 1
while idx_post < len(l):
vpre = l[idx_pre]
idx_post = idx_pre + 1
while idx_post < len(l):
vpost = l[idx_post]
if not func_prenext(vpre, vpost):
idx_post += 1
else:
lnew.append(vpost)
idx_pre = idx_post
break
return lnew
def filter_by_func_prenext_series(series, func_prenext,
func_ignore=None, val_nan=np.nan):
'''
对series (`pandas.Series`)调用 ``filter_by_func_prenext`` 函数进行过滤,
其中满足 ``func_ignore`` 函数为True的值不参与过滤,func_ignore函数默认为:
``lambda x: isnull(x)``
series中 **被过滤的值** 在返回结果中用 ``val_nan`` 替换, **不参与过滤** 的值保持不变
See Also
--------
:func:`dramkit.gentools.filter_by_func_prenext`
Examples
--------
>>> series = pd.Series([1, 2, 3, 4, 1, 1, 2, 3, 6])
>>> func_prenext = lambda x, y: (y-x) >= 2
>>> filter_by_func_prenext_series(series, func_prenext)
0 1.0
1 NaN
2 3.0
3 NaN
4 NaN
5 NaN
6 NaN
7 NaN
8 6.0
>>> series = pd.Series([1, 2, 0, 3, 0, 4, 0, 1, 0, 0, 1, 2, 3, 6],
... index=range(14, 0, -1))
>>> filter_by_func_prenext_series(series, func_prenext, lambda x: x == 0)
14 1.0
13 NaN
12 0.0
11 3.0
10 0.0
9 NaN
8 0.0
7 NaN
6 0.0
5 0.0
4 NaN
3 NaN
2 NaN
1 6.0
'''
if func_ignore is None:
func_ignore = lambda x: isnull(x)
l = [[k, series.iloc[k]] for k in range(0, len(series)) \
if not func_ignore(series.iloc[k])]
lnew = filter_by_func_prenext(l, lambda x, y: func_prenext(x[1], y[1]))
i_l = [k for k, v in l]
i_lnew = [k for k, v in lnew]
idxs_ignore = [_ for _ in i_l if _ not in i_lnew]
seriesNew = series.copy()
for k in idxs_ignore:
seriesNew.iloc[k] = val_nan
return seriesNew
def merge_df(df_left, df_right, same_keep='left', **kwargs):
'''
在 ``pd.merge`` 上改进,相同列名时自动去除重复的
Parameters
----------
df_left : pandas.DataFrame
待merge左表
df_right : pandas.DataFrame
待merge右表
same_keep : str
可选'left', 'right',设置相同列保留左边df还是右边df
**kwargs :
pd.merge接受的其他参数
:returns: `pandas.DataFrame` - 返回merge之后的数据表
'''
same_cols = [x for x in df_left.columns if x in df_right.columns]
if len(same_cols) > 0:
if 'on' in kwargs:
if isinstance(kwargs['on'], list):
same_cols = [x for x in same_cols if x not in kwargs['on']]
elif isinstance(kwargs['on'], str):
same_cols = [x for x in same_cols if x != kwargs['on']]
else:
raise ValueError('on参数只接受list或str!')
if same_keep == 'left':
df_right = df_right.drop(same_cols, axis=1)
elif same_keep == 'right':
df_left = df_left.drop(same_cols, axis=1)
else:
raise ValueError('same_keep参数只接受`left`或`right`!')
return pd.merge(df_left, df_right, **kwargs)
def cut_df_by_con_val(df, by_col, func_eq=None):
'''
根据 `by_col` 列的值,将 `df (pandas.DataFrame)` 切分为多个子集列表,返回 `list`
切分依据:``func_eq`` 函数作用于 ``by_col`` 列,函数值连续相等的记录被划分到一个子集中
Examples
--------
>>> df = pd.DataFrame({'val': range(0,10),
... 'by_col': ['a']*3+['b']*2+['c']*1+['a']*3+['d']*1})
>>> df.index = ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q']
>>> cut_df_by_con_val(df, 'by_col')
[ val by_col
z 0 a
y 1 a
x 2 a,
val by_col
w 3 b
v 4 b,
val by_col
u 5 c,
val by_col
t 6 a
s 7 a
r 8 a,
val by_col
q 9 d]
'''
if isnull(func_eq):
func_eq = lambda x: x
df = df.copy()
df['val_func_eq'] = df[by_col].apply(func_eq)
by_col = 'val_func_eq'
sub_dfs= []
k = 0
while k < df.shape[0]:
k1 = k + 1
while k1 < df.shape[0] and df[by_col].iloc[k1] == df[by_col].iloc[k]:
k1 += 1
sub_dfs.append(df.iloc[k:k1, :].drop(by_col, axis=1))
k = k1
return sub_dfs
def get_con_start_end(series, func_con):
'''
找出series (`pandas.Series`)中值连续满足 ``func_con`` 函数值为True的分段起止位置,
返回起止位置对列表
Examples
--------
>>> series = pd.Series([0, 1, 1, 0, 1, 1, 0, -1, -1, 0, 0, -1, 1, 1, 1, 1, 0, -1])
>>> start_ends = get_con_start_end(series, lambda x: x == -1)
>>> start_ends
[[7, 8], [11, 11], [17, 17]]
>>> start_ends = get_con_start_end(series, lambda x: x == 1)
>>> start_ends
[[1, 2], [4, 5], [12, 15]]
'''
start_ends = []
# df['start'] = 0
# df['end'] = 0
start = 0
N = len(series)
while start < N:
if func_con(series.iloc[start]):
end = start
while end < N and func_con(series.iloc[end]):
end += 1
start_ends.append([start, end-1])
# df.loc[df.index[start], 'start'] = 1
# df.loc[df.index[end-1], 'end'] = 1
start = end + 1
else:
start += 1
return start_ends
def cut_range_to_subs(n, gap):
'''
将 ``range(0, n)`` 切分成连续相接的子集:
``[range(0, gap), range(gap, 2*gap), ...]``
'''
n_ = n // gap
mod = n % gap
if mod != 0:
return [(k*gap, (k+1)*gap) for k in range(0, n_)] + [(gap * n_, n)]
else:
return [(k*gap, (k+1)*gap) for k in range(0, n_)]
def check_l_allin_l0(l, l0):
'''
判断 ``l (list)`` 中的值是否都是 ``l0 (list)`` 中的元素, 返回True或False
Examples
--------
>>> l = [1, 2, 3, -1, 0]
>>> l0 = [0, 1, -1]
>>> check_l_allin_l0(l, l0)
False
>>> l = [1, 1, 0, -1, -1, 0, 0]
>>> l0 = [0, 1, -1]
>>> check_l_in_l0(l, l0)
True
'''
l_ = set(l)
l0_ = set(l0)
return len(l_-l0_) == 0
def check_exist_data(df, x_list, cols=None):
'''
依据指定的 ``cols`` 列检查 ``df (pandas.DataFrame)`` 中是否已经存在 ``x_list (list)`` 中的记录,
返回list,每个元素值为True或False
Examples
--------
>>> df = pd.DataFrame([['1', 2, 3.1, ], ['3', 4, 5.1], ['5', 6, 7.1]],
... columns=['a', 'b', 'c'])
>>> x_list, cols = [[3, 4], ['3', 4]], ['a', 'b']
>>> check_exist_data(df, x_list, cols=cols)
[False, True]
>>> check_exist_data(df, [['1', 3.1], ['3', 5.1]], ['a', 'c'])
[True, True]
'''
if not isnull(cols):
df_ = df.reindex(columns=cols)
else:
df_ = df.copy()
data = df_.to_dict('split')['data']
return [x in data for x in x_list]
def isnull(x):
'''判断x是否为无效值(None, nan, x != x),若是无效值,返回True,否则返回False'''
if x is None:
return True
if x is np.nan:
return True
try:
if x != x:
return True
except:
pass
return False
def x_div_y(x, y, v_x0=None, v_y0=0, v_xy0=1):
'''
x除以y
- v_xy0为当x和y同时为0时的返回值
- v_y0为当y等于0时的返回值
- v_x0为当x等于0时的返回值
'''
if x == 0 and y == 0:
return v_xy0
if x != 0 and y == 0:
return v_y0
if x == 0 and y != 0:
return 0 if v_x0 is None else v_x0
return x / y
def power(a, b, return_real=True):
'''计算a的b次方,return_real设置是否只返回实属部分'''
c = a ** b
if isnull(c):
c = complex(a) ** complex(b)
if return_real:
c = c.real
return c
def cal_pct(v0, v1, vv00=1, vv10=-1):
'''
计算从v0到v1的百分比变化
- vv00为当v0的值为0且v1为正时的返回值,v1为负时取负号
- vv10为当v1的值为0且v0为正时的返回值,v0为负时取负号
'''
if isnull(v0) or isnull(v1):
return np.nan
if v0 == 0:
if v1 == 0:
return 0
elif v1 > 0:
return vv00
elif v1 < 0:
return -vv00
elif v1 == 0:
if v0 > 0:
return vv10
elif v0 < 0:
return -vv10
elif v0 > 0 and v1 > 0:
return v1 / v0 - 1
elif v0 < 0 and v1 < 0:
return -(v1 / v0 - 1)
elif v0 > 0 and v1 < 0:
return v1 / v0 - 1
elif v0 < 0 and v1 > 0:
return -(v1 / v0 - 1)
def min_com_multer(l):
'''求一列数 `l (list)` 的最小公倍数,支持负数和浮点数'''
l_max = max(l)
mcm = l_max
while any([mcm % x != 0 for x in l]):
mcm += l_max
return mcm
def max_com_divisor(l):
'''
求一列数 `l (list)` 的最大公约数,只支持正整数
.. note::
只支持正整数
'''
def _isint(x):
'''判断x是否为整数'''
tmp = str(x).split('.')
if len(tmp) == 1 or all([x == '0' for x in tmp[1]]):
return True
return False
if any([x < 1 or not _isint(x) for x in l]):
raise ValueError('只支持正整数!')
l_min = min(l)
mcd = l_min
while any([x % mcd != 0 for x in l]):
mcd -= 1
return mcd
def mcd2_tad(a, b):
'''
辗转相除法求a和b的最大公约数,a、b为正数
.. note::
- a, b应为正数
- a, b为小数时由于精度问题会不正确
'''
if a < b:
a, b = b, a # a存放较大值,b存放较小值
if a % b == 0:
return b
else:
return mcd2_tad(b, a % b)
def max_com_divisor_tad(l):
'''
用辗转相除法求一列数 `l (list)` 的最大公约数, `l` 元素均为正数
.. note::
- l元素均为正数
- l元素为小数时由于精度问题会不正确
References
----------
https://blog.csdn.net/weixin_45069761/article/details/107954905
'''
# g = l[0]
# for i in range(1, len(l)):
# g = mcd2_tad(g, l[i])
# return g
return reduce(lambda x, y: mcd2_tad(x, y), l)
def get_appear_order(series, ascending=True):
'''
标注series (`pandas.Series` , 离散值)中重复元素是第几次出现,
返回为 `pandas.Series`,ascending设置返回结果是否按出现次序升序排列
Examples
--------
>>> df = pd.DataFrame({'v': ['A', 'B', 'A', 'A', 'C', 'C']})
>>> df.index = ['a', 'b', 'c', 'd', 'e', 'f']
>>> df['nth'] = get_appear_order(df['v'], ascending=False)
>>> df
v nth
a A 3
b B 1
c A 2
d A 1
e C 2
f C 1
'''
df = | pd.DataFrame({'v': series}) | pandas.DataFrame |
# coding=utf-8
"""
Module to apply a previously trained model to estimate the epigenome
for a specific cell type in a different species
"""
import os as os
import pandas as pd
import numpy as np
import numpy.random as rng
import operator as op
import multiprocessing as mp
import json as json
import pickle as pck
from scipy.interpolate import LSQUnivariateSpline as kspline
from crplib.auxiliary.seq_parsers import get_twobit_seq
from crplib.auxiliary.hdf_ops import load_masked_sigtrack, get_valid_hdf5_groups, get_mapindex_groups
from crplib.auxiliary.file_ops import create_filepath
from crplib.auxiliary.modeling import select_dataset_subset, load_model, \
load_model_metadata, get_scorer, load_ml_dataset, determine_scoring_method, apply_preprocessor
from crplib.mlfeat.featdef import feat_mapsig, get_online_version
from crplib.auxiliary.constants import CHROMOSOME_BOUNDARY
from crplib.metadata.md_signal import MD_SIGNAL_COLDEFS
from crplib.metadata.md_signal import gen_obj_and_md as gen_sigobj
from crplib.metadata.md_regions import MD_REGION_COLDEFS
from crplib.metadata.md_regions import gen_obj_and_md as genregobj
def load_dataset(fpath, groups, features, subset='', ycol='', ytype=None):
"""
:param fpath:
:param groups:
:param features:
:param subset:
:param ycol:
:param ytype:
:return:
"""
with | pd.HDFStore(fpath, 'r') | pandas.HDFStore |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = | Series(['a;b', 'a', 7]) | pandas.Series |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from config import test_snr_dB
import pandas as pd
from scipy.stats import ttest_1samp
def plot_paper_results(folder_envtfs, folder_stft):
sns.set(style="whitegrid")
df_env = pd.read_csv('models\\' + folder_envtfs + '\\results.csv', sep=';')
df_stft = pd.read_csv('models\\' + folder_stft + '\\results.csv', sep=';')
df_orig = df_env.copy()
df_orig = df_orig.drop(['eSTOI pred.'],axis=1)
df_orig = df_orig.drop(['PESQ pred.'],axis=1)
df_orig = df_orig.rename(columns={'eSTOI orig.':'eSTOI pred.'})
df_orig = df_orig.rename(columns={'PESQ orig.':'PESQ pred.'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks",font='STIXGeneral')
fig = plt.figure(figsize=(11, 4.5))
size=16
plt.subplot(121)
ax = sns.boxplot(x='SNR', y='eSTOI pred.', hue=' ', data=df, fliersize=1)
plt.xlabel('SNR (dB)', {'size': size})
plt.ylabel('eSTOI', {'size': size})
ax.legend_.remove()
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
ax.tick_params(labelsize=size)
lines, labels = ax.get_legend_handles_labels()
# fig.legend(lines, labels, loc='upper center')
fig.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.53, 0.10), shadow = False, ncol = 3, prop={'size': size-3})
plt.tight_layout()
# plt.savefig('fig4.1_estoi_total.pdf',dpi=2000)
# plt.show()
# plt.figure(figsize=(11, 4.5))
plt.subplot(122)
ax = sns.boxplot(x='SNR', y='PESQ pred.', hue=' ', data=df, fliersize=1)
ax.legend_.remove()
ax.tick_params(labelsize=size)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.11), ncol = 3)
plt.xlabel('SNR (dB)',{'size': size})
plt.ylabel('PESQ', {'size': size})
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
plt.tight_layout()
plt.savefig('fig4_estoi_pesq_total.pdf',dpi=2000)
plt.show()
# multi plot
sns.set(style="ticks",font='STIXGeneral',font_scale=1.3)
g = sns.relplot(x="SNR", y="eSTOI pred.", hue = " ", col = "Noise", data = df, kind = "line",
col_wrap=5, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('eSTOI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}",)
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.84, 0.86]) # coordinates of lower left of bounding box
leg._loc = 1
plt.savefig('fig5_estoi_per_noise.pdf',bbox_inches='tight',dpi=2000)
plt.show()
# eSTOI increase histogram
plt.figure()
ax = sns.distplot(df_env['eSTOI pred.'] - df_env['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='ENV-TFS')
sns.distplot(df_stft['eSTOI pred.'] - df_stft['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
ax.set_xticklabels(['{:,.0%}'.format(x) for x in vals])
plt.xlabel('eSTOI increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
# PESQ increase per snr histogram
# ax = sns.kdeplot(df_env['SNR'], df_env['PESQ pred.'] - df_env['PESQ orig.'], cmap="Reds", shade=True,shade_lowest=False, label='ENV')
# sns.kdeplot(df_stft['SNR'], df_stft['PESQ pred.'] - df_stft['PESQ orig.'], cmap="Blues", shade=True,shade_lowest=False, label='STFT')
ax = sns.distplot(df_env['PESQ pred.'] - df_env['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True,
label='ENV-TFS')
sns.distplot(df_stft['PESQ pred.'] - df_stft['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
plt.xlabel('PESQ increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
return
def plot_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 = pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
df_stft4 = pd.read_excel('models\\' + folder_stft + '\\HA_4.xls')
df_stft5 = pd.read_excel('models\\' + folder_stft + '\\HA_5.xls')
df_stft6 = pd.read_excel('models\\' + folder_stft + '\\HA_6.xls')
df_env1['Profile'] = 'HL1'
df_env2['Profile'] = 'HL2'
df_env3['Profile'] = 'HL3'
df_env4['Profile'] = 'HL4'
df_env5['Profile'] = 'HL5'
df_env6['Profile'] = 'HL6'
df_stft1['Profile'] = 'HL1'
df_stft2['Profile'] = 'HL2'
df_stft3['Profile'] = 'HL3'
df_stft4['Profile'] = 'HL4'
df_stft5['Profile'] = 'HL5'
df_stft6['Profile'] = 'HL6'
df_env = | pd.concat([df_env1, df_env2, df_env3, df_env4, df_env5, df_env6]) | pandas.concat |
import numpy
import pandas
import operator
import ema_workbench.analysis.prim
from ..scope.box import Box, Bounds, Boxes
from ..scope.scope import Scope
from .discovery import ScenarioDiscoveryMixin
from plotly import graph_objects as go
class Prim(ema_workbench.analysis.prim.Prim, ScenarioDiscoveryMixin):
def find_box(self):
result = super().find_box()
result.__class__ = PrimBox
result._explorer = getattr(self, '_explorer', None)
return result
def tradeoff_selector(self, n=-1, colorscale='viridis'):
'''
Visualize the trade off between coverage and density, for
a particular PrimBox.
Parameters
----------
n : int, optional
The index number of the PrimBox to use. If not given,
the last found box is used. If no boxes have been found
yet, giving any value other than -1 will raise an error.
colorscale : str, default 'viridis'
A valid color scale name, as compatible with the
color_palette method in seaborn.
Returns
-------
FigureWidget
'''
try:
box = self._boxes[n]
except IndexError:
if n == -1:
box = self.find_box()
else:
raise
return box.tradeoff_selector(colorscale=colorscale)
def _discrete_color_scale(name='viridis', n=8):
import seaborn as sns
colors = sns.color_palette(name, n)
colorlist = []
for i in range(n):
c = colors[i]
thiscolor_s = f"rgb({int(c[0]*255)}, {int(c[1]*255)}, {int(c[2]*255)})"
colorlist.append([i/n, thiscolor_s])
colorlist.append([(i+1)/n, thiscolor_s])
return colorlist
class PrimBox(ema_workbench.analysis.prim.PrimBox):
def to_emat_box(self, i=None, name=None):
if i is None:
i = self._cur_box
if name is None:
name = f'prim box {i}'
limits = self.box_lims[i]
b = Box(name)
for col in limits.columns:
if isinstance(self.prim.x.dtypes[col], pandas.CategoricalDtype):
if set(self.prim.x[col].cat.categories) != limits[col].iloc[0]:
b.replace_allowed_set(col, limits[col].iloc[0])
else:
if limits[col].iloc[0] != self.prim.x[col].min():
b.set_lower_bound(col, limits[col].iloc[0])
if limits[col].iloc[1] != self.prim.x[col].max():
b.set_upper_bound(col, limits[col].iloc[1])
b.coverage = self.peeling_trajectory['coverage'][i]
b.density = self.peeling_trajectory['density'][i]
b.mass = self.peeling_trajectory['mass'][i]
return b
def __repr__(self):
i = self._cur_box
head = f"<{self.__class__.__name__} peel {i+1} of {len(self.peeling_trajectory)}>"
# make the box definition
qp_values = self.qp[i]
uncs = [(key, value) for key, value in qp_values.items()]
uncs.sort(key=operator.itemgetter(1))
uncs = [uncs[0] for uncs in uncs]
box_lim = | pandas.DataFrame( index=uncs, columns=['min','max']) | pandas.DataFrame |
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from matplotlib.ticker import FuncFormatter # used in formatting log scales
import mpl_toolkits.basemap.pyproj as pyproj
import hydro
#%matplotlib inline
data = | pd.read_csv("stream.csv") | pandas.read_csv |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = | DataFrame({"A": [np.nan, 3, np.nan]}, index=base) | pandas.DataFrame |
# Imports for python2 implementation.
from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import sys, os
import numpy as np
import pandas as pd
import MDSplus as mds
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import ticker, cm, colors
from scipy.interpolate import Rbf, interp1d
from gadata import gadata
class ThomsonClass:
"""
Examples of how to use ThomsonClass object.
Example #1:
ts = ThomsonClass(176343, 'divertor')
ts.load_ts()
ts.map_to_efit(times=np.linspace(2500, 5000, 10), ref_time=2500)
ts.heatmap(detach_front_te=5, offset=0.1)
Example #2:
ts = ThomsonClass(176343, 'core')
ts.load_ts()
ts.map_to_efit(np.linspace(2500, 5000, 10))
ts.avg_omp # Dictionary of average omp values.
"""
def __init__(self, shot, system):
self.shot = shot
self.system = system
self.conn = None
self.ts_dict = {}
def __repr__(self):
return "ThomsonClass Object\n" \
+ " Shot: " + str(self.shot) + "\n" \
+ " System: " + str(self.system)
def load_ts(self, verbal=True, times=None, tunnel=True, filter=False,
fs='FS04', avg_thresh=2, method='simple', window_len=11):
"""
Function to get all the data from the Thomson Scattering "BLESSED" tree
on atlas. The data most people probably care about is the temperature (eV)
and density (m-3) data in ts_dict. In the 'temp' and 'density' entries,
'X' is the time, and 'Y' is a 2D array where each row is the data for a
single chord at each of those times. Mapping to psin coordinates and such
are done in a different function.
tunnel: If using locally, set to True. This mean you need an ssh tunnel connected
to local host. The command:
ssh -Y -p 2039 -L 8000:atlas.gat.com:8000 <EMAIL>
should work in a separate terminal. Set to False if on DIII-D network.
times: Provide times that a polynomial fit will be applied to, and then
averaged over. This is my attempt at smoothing the TS data when
it's noisy (maybe to help with ELMs).
filter: Run the filter function to do a simple filter of the data.
"""
# Create thin connection to MDSplus on atlas. Tunnel if connection locally.
if tunnel:
conn = mds.Connection("localhost")
else:
conn = mds.Connection("atlas.gat.com")
# Store the connection object in the class for later use.
self.conn = conn
# Open the tree of the shot we want.
tree = conn.openTree("d3d", self.shot)
# Will need to probably update this as I go, since I'm not sure what
# the earliest BLESSED shot was (or which is REVISION01 for that matter).
if self.shot <= 94741:
base = "\\D3D::TOP.ELECTRONS.TS.REVISIONS.REVISION00"
else:
base = "\\D3D::TOP.ELECTRONS.TS.BLESSED"
# Specify which system we want.
print("Thomson system: " + self.system)
if self.system is "core":
base = base + ".CORE"
elif self.system is "divertor":
base = base + ".DIVERTOR"
elif self.system is "tangential":
base = base + ".TANGENTIAL"
# List containing all the names of the nodes under BLESSED.
nodes = ["CALCMASK", "CDPOLYBOX", "CDPULSE", "CHANNEL", "CHI_MAX",
"CHI_MIN", "DCDATA", "DENSITY", "DENSITY_E", "DETOPT",
"DCPEDESTAL", "DETOPT", "FITDATA", "FITTHRESHOLD", "FRACCHI",
"INIT_NE", "INIT_TE", "ITMICRO", "LFORDER", "LPROF", "MAXFITS",
"PHI", "PLDATA", "PLERROR", "PLPEDESTAL", "PLPEDVAR", "R",
"REDCHISQ", "SUPOPT", "TEMP", "TEMP_E", "THETA", "TIME", "Z"]
if not verbal:
print("Loading Thomson data...")
# Get the data for each node, and put it in a dictionary of X and Y values.
# For 1D data, like "Z", the X will be the channel number, and the Y will
# be the Z coordinate.
for node in nodes:
try:
# Make the path to the node.
path = base + "." + node
if verbal:
print("Getting data from node: " + path)
# Get the data(Y) and dimension data(X) from the node.
data = conn.get(path).data()
data_dim = conn.get("DIM_OF(" + path + ")").data()
# Put into a dictionary then put it into the master dictionary.
data_dict = {"X":data_dim, "Y":data}
self.ts_dict[node.lower()] = data_dict
# If the node is TEMP or DENSITY, there are nodes beneath it. I haven't
# seen data in these nodes ever (except R and Z, but they're redundant
# and the same as the R and Z nodes as above), but check anyways.
if node in ["TEMP", "DENSITY"]:
for subnode in ["PHI", "PSI01", "PSI02", "R", "RHO01", "RHO02", "Z"]:
path = base + "." + node + "." + subnode
if verbal:
print("Getting data from node: " + path)
try:
data = conn.get(path).data()
data_dim = conn.get("dim_of(" + path + ")").data()
data_dict = {"Time":data_dim, subnode.lower():data}
self.ts_dict[node.lower() + "." + subnode.lower()] = data_dict
except (mds.MdsIpException, mds.TreeNODATA):
if verbal:
print(" Node has no data.")
# This error is returned if the node is empty. Catch it.
#except (mds.MdsIpException, mds.TreeNODATA, mds.MdsException):
#except (mds.MdsException):
# if verbal:
# print(" Node has no data.")
# For compatablity with some of the older shots.
#except mds.TreeNNF:
# if verbal:
# print(" Node not found.")
except:
if verbal:
print(" Node not found/no data.")
# Pull these into DataFrames, a logical and easy way to represent the
# data. Initially the rows are each a TS chord, and the columns are at
# each time.
self.temp_df = pd.DataFrame(columns=self.ts_dict['temp']['X'], data=self.ts_dict['temp']['Y'])
self.dens_df = pd.DataFrame(columns=self.ts_dict['density']['X'], data=self.ts_dict['density']['Y'])
self.temp_df.index.name = 'Chord'
self.temp_df.columns.name = 'Time (ms)'
self.dens_df.index.name = 'Chord'
self.dens_df.columns.name = 'Time (ms)'
# Transpose the data so each row is at a specific time, and the columns
# are the chords.
self.temp_df = self.temp_df.transpose()
self.dens_df = self.dens_df.transpose()
# Filter the data from ELMs and just replace with the filtered dataframes.
if filter:
print("Filtering data...")
self.temp_df_unfiltered = self.temp_df
self.dens_df_unfiltered = self.dens_df
self.filter_elms(fs=fs, avg_thresh=avg_thresh, method=method, window_len=window_len)
self.temp_df = self.temp_df_filt
self.dens_df = self.dens_df_filt
# Do a polynomial fit to the data. Fifth-order.
if times is not None:
self.temp_df_poly = pd.DataFrame()
self.dens_df_poly = pd.DataFrame()
xp = np.linspace(times.min(), times.max(), 1000)
for chord in self.temp_df:
# Limit time between desired times.
x = self.temp_df.index.values
idxs = np.where(np.logical_and(x >= times.min(), x <= times.max()))[0]
x = x[idxs]
y_te = self.temp_df[chord].values[idxs]
y_ne = self.dens_df[chord].values[idxs]
z_te = np.polyfit(x, y_te, 5) # Returns five exponents.
z_ne = np.polyfit(x, y_ne, 5)
p_te = np.poly1d(z_te) # Creates the fit with the exponents.
p_ne = np.poly1d(z_ne)
yp_te = p_te(xp) # Use the fit to create 1,000 points.
yp_ne = p_ne(xp)
# Put into the Dataframe.
self.temp_df_poly[chord] = yp_te
self.dens_df_poly[chord] = yp_ne
# Give the index the times.
self.temp_df_poly.index = xp
self.dens_df_poly.index = xp
# Can we just swap temp_df out with temp_df_poly?
#self.temp_df = self.temp_df_poly
#self.dens_df = self.dens_df_poly
def load_gfile_mds(self, shot, time, tree="EFIT04", exact=False,
connection=None, tunnel=True, verbal=True):
"""
This is scavenged from the load_gfile_d3d script on the EFIT repository,
except updated to run on python3.
shot: Shot to get gfile for.
time: Time of the shot to load gfile for, in ms.
tree: One of the EFIT trees to get the data from.
exact: If True will raise error if time does not exactly match any gfile
times. False will grab the closest time.
connection: An MDSplus connection to atlas.
tunnel: Set to True if accessing outside DIII-D network.
returns: The requested gfile as a dictionary.
"""
# Connect to server, open tree and go to g-file
if connection is None:
if tunnel is True:
connection = mds.Connection("localhost")
else:
connection = mds.Connection('atlas.gat.com')
connection.openTree(tree, shot)
base = 'RESULTS:GEQDSK:'
# get time slice
if verbal:
print("Loading gfile:")
print(" Shot: " + str(shot))
print(" Tree: " + tree)
print(" Time: " + str(time))
signal = 'GTIME'
k = np.argmin(np.abs(connection.get(base + signal).data() - time))
time0 = int(connection.get(base + signal).data()[k])
if (time != time0):
if exact:
raise RuntimeError(tree + ' does not exactly contain time %.2f'\
%time + ' -> Abort')
else:
if verbal:
print('Warning: Closest time is ' + str(time0) +'.')
#print('Fetching time slice ' + str(time0))
time = time0
# store data in dictionary
g = {'shot': shot, 'time': time}
# get header line
try:
header = connection.get(base + 'ECASE').data()[k]
except:
print(" No header line.")
# get all signals, use same names as in read_g_file
translate = {'MW': 'NR', 'MH': 'NZ', 'XDIM': 'Xdim', 'ZDIM': 'Zdim', 'RZERO': 'R0',
'RMAXIS': 'RmAxis', 'ZMAXIS': 'ZmAxis', 'SSIMAG': 'psiAxis', 'SSIBRY': 'psiSep',
'BCENTR': 'Bt0', 'CPASMA': 'Ip', 'FPOL': 'Fpol', 'PRES': 'Pres',
'FFPRIM': 'FFprime', 'PPRIME': 'Pprime', 'PSIRZ': 'psiRZ', 'QPSI': 'qpsi',
'NBBBS': 'Nlcfs', 'LIMITR': 'Nwall'}
for signal in translate:
try:
g[translate[signal]] = connection.get(base + signal).data()[k]
except:
print(" Node not found: " + base + signal)
g['R1'] = connection.get(base + 'RGRID').data()[0]
g['Zmid'] = 0.0
RLIM = connection.get(base + 'LIM').data()[:, 0]
ZLIM = connection.get(base + 'LIM').data()[:, 1]
g['wall'] = np.vstack((RLIM, ZLIM)).T
RBBBS = connection.get(base + 'RBBBS').data()[k][:int(g['Nlcfs'])]
ZBBBS = connection.get(base + 'ZBBBS').data()[k][:int(g['Nlcfs'])]
g['lcfs'] = np.vstack((RBBBS, ZBBBS)).T
KVTOR = 0
RVTOR = 1.7
NMASS = 0
RHOVN = connection.get(base + 'RHOVN').data()[k]
# convert floats to integers
for item in ['NR', 'NZ', 'Nlcfs', 'Nwall']:
g[item] = int(g[item])
# convert single (float32) to double (float64) and round
for item in ['Xdim', 'Zdim', 'R0', 'R1', 'RmAxis', 'ZmAxis', 'psiAxis',
'psiSep', 'Bt0', 'Ip']:
g[item] = np.round(np.float64(g[item]), 7)
# convert single arrays (float32) to double arrays (float64)
for item in ['Fpol', 'Pres', 'FFprime', 'Pprime', 'psiRZ', 'qpsi',
'lcfs', 'wall']:
g[item] = np.array(g[item], dtype=np.float64)
# Construct (R,Z) grid for psiRZ
g['dR'] = g['Xdim']/(g['NR'] - 1)
g['R'] = g['R1'] + np.arange(g['NR'])*g['dR']
g['dZ'] = g['Zdim']/(g['NZ'] - 1)
NZ2 = int(np.floor(0.5*g['NZ']))
g['Z'] = g['Zmid'] + np.arange(-NZ2, NZ2+1)*g['dZ']
# normalize psiRZ
g['psiRZn'] = (g['psiRZ'] - g['psiAxis']) / (g['psiSep'] - g['psiAxis'])
return g
def map_to_efit(self, times=None, ref_time=None, average_ts=5, debug=False,
tree='EFIT04', choose_interp_region=False, trunc_div=False):
"""
This function uses the gfiles of each time in times to find the location
of each TS chord relative to the X-point in polar coordinates, (d, theta).
By doing this for a swept strike point and mapping each (d, theta) back
to a reference frame, 2D profiles of Te and ne can be obtained.
BUG: Mapping the core to the X-point during a sweep doesn't work. The
X-point can move while the core plasma doesn't, thus a moving X-point
could make it seem like the core sweeps a range too when it doesn't.
A fix here could be mapping the core to the plasma center, while the
divertor TS stays with the X-point.
Note: Currently only written for LSN. Needs to be updated for USN.
times: A list of times to average over and map to. Can be a single float
or list. The gfile for each time will be loaded.
ref_time: The time for the reference frame, which the 2D plots will be
plotted over. For a left to right sweep, choose the time
where the strike point is furthest to the left.
average_ts: A parameter to help smooth the data a bit. Instead of just
taking the TS data at the time that matches the current
gile, it will take the previous 5 and next 5 TS times and
average them.
"""
# Just use the first time as the ref_time (this may be run without
# needing it so catch it).
if ref_time is None:
ref_time = times[0]
times = np.array(times)
# Store times for later use in plotting function.
self.times = np.array(times)
self.ref_time = ref_time
self.temp_df_omp = pd.DataFrame()
self.dens_df_omp = pd.DataFrame()
# Load gfile(s).
self.ref_df = pd.DataFrame()
self.ref_df.index.name = 'Chord'
self.ref_df.columns.name = 'Time (R, Z)'
count = 1
# Current implementation makes the first time the reference frame. So make
# times have the ref_time in front.
ref_idx = np.where(times == ref_time)[0][0]
times = np.append(ref_time, np.append(times[:ref_idx], times[ref_idx+1:]))
ref_flag = True
# Defaults for fitting the LCFS interpolation functions.
start = 13; end = 71
for time in times:
print('\nLoading gfile (' + str(count) + '/' + str(len(times)) + ')...')
try:
gfile = self.load_gfile_mds(shot=self.shot, time=time, connection=self.conn, verbal=True, tree=tree)
# Store for plotting function.
if ref_flag:
self.ref_gfile = gfile
# Z's and R's of the separatrix, in m.
Zes = np.copy(gfile['lcfs'][:, 1])
Res = np.copy(gfile['lcfs'][:, 0])
# The location of the X-point is here where the lowest Z is (LSN).
xpoint_idx = np.where(Zes == Zes.min())[0][0]
Rx = Res[xpoint_idx]
Zx = Zes[xpoint_idx]
if debug:
print('X-point (R, Z): ({:.2f}, {:.2f})'.format(Rx, Zx))
# Use polar coordinates with the X-point as the origin.
# The distance from the X-point is just the distance formula.
rs = self.ts_dict['r']['Y']; zs = self.ts_dict['z']['Y']
# If this is the first time(the reference frame), save these values.
# These ref rs, zs are already (R, Z) in the reference frame (obviously),
# so we don't need to convert to polar and then back to (R, Z) in the
# reference frame.
if ref_flag:
Rx_ref = Rx; Zx_ref = Zx
rs_ref = rs; zs_ref = zs
self.ref_df[str(time)] = list(zip(rs_ref, zs_ref))
ref_flag = False
else:
# The angle is computed using arctan2 to get the correct quadrant (radians).
theta = np.arctan2(zs - Zx, rs - Rx)
#print("Theta: ", end=''); print(*theta, sep=', ')
d = np.sqrt((rs - Rx)**2 + (zs - Zx)**2)
#print("Distance from X-point: ", end=''); print(*d, sep=', ')
# Now convert back to (R, Z), but in the reference frame.
rs_into_ref = d * np.cos(theta) + Rx_ref
zs_into_ref = d * np.sin(theta) + Zx_ref
self.ref_df[str(time)] = list(zip(rs_into_ref, zs_into_ref))
# Find the Te and ne data to put in for these times as well. Average
# the neighboring +/-"average_ts" points. Ex. If average_ts = 5, and
# the time is 2000, it will take the last 5 and next 5 TS points and
# average the resulting 11 profiles together into one for 2000 ms.
# First find closest time in the index.
idx = np.abs(self.temp_df.index.values - time).argmin()
# Average the desired range of values.
#self.avg_temp_df = self.temp_df.iloc[idx - average_ts : idx + average_ts].mean()
#self.avg_dens_df = self.dens_df.iloc[idx - average_ts : idx + average_ts].mean()
self.avg_temp_df = self.temp_df.iloc[idx]
self.avg_dens_df = self.dens_df.iloc[idx]
# Append this to our ref_df.
self.ref_df['Te at ' + str(time)] = self.avg_temp_df
self.ref_df['Ne at ' + str(time)] = self.avg_dens_df
# Let's also map each chord to R-Rsep omp and store in a DataFrame.
# Get the additional information needed for this.
Rs, Zs = np.meshgrid(gfile['R'], gfile['Z'])
Z_axis = gfile['ZmAxis']
R_axis = gfile['RmAxis']
if trunc_div:
Rs_trunc = Rs > self.ts_dict['r']['Y'][0] * 0.95
else:
Rs_trunc = Rs > R_axis
# Only want the outboard half since thats where we're mapping R-Rsep OMP to.
if choose_interp_region:
lcfs_rs = gfile['lcfs'][:, 0]
lcfs_zs = gfile['lcfs'][:, 1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(lcfs_rs, lcfs_zs, 'k.')
for i in range(0, len(lcfs_rs)):
ax.annotate(i, (lcfs_rs[i], lcfs_zs[i]))
ax.plot(rs, zs, 'r.')
fig.show()
start = int(input('Enter start index for interpolation region: '))
end = int(input('Enter end index for interpolation region: '))
choose_interp_region = False
Zes_outboard = np.copy(gfile['lcfs'][:, 1][start:end])
Res_outboard = np.copy(gfile['lcfs'][:, 0][start:end])
#else:
# Zes_outboard = np.copy(gfile['lcfs'][:, 1][13:-17])
# Res_outboard = np.copy(gfile['lcfs'][:, 0][13:-17])
try:
# Interpolation functions of psin(R, Z) and R(psin, Z). Rs_trunc
# helps with not interpolating the entire plasma, and just that
# to the right of the magnetic axis, which is normally good enough.
f_psiN = Rbf(Rs[Rs_trunc], Zs[Rs_trunc], gfile['psiRZn'][Rs_trunc])
f_Romp = Rbf(gfile['psiRZn'][Rs_trunc], Zs[Rs_trunc], Rs[Rs_trunc], epsilon=0.00001)
f_Rs = interp1d(Zes_outboard, Res_outboard, assume_sorted=False)
# The process is to get the (R, Z) of each chord...
chord_rs = self.ts_dict['r']['Y']
chord_zs = self.ts_dict['z']['Y']
# ...then find the corresponding psin of each...
chord_psins = f_psiN(chord_rs, chord_zs)
# ...then use this psin to find the value at the omp (Z_axis)...
chord_omps = f_Romp(chord_psins, np.full(len(chord_psins), Z_axis))
# ... get the value of the separatrix at the omp the calculate
# R-Rsep omp...
rsep_omp = f_Rs(Z_axis)
chord_rminrsep_omps = chord_omps - rsep_omp
if debug:
print("Rsep OMP: {:.3f}".format(rsep_omp))
print("Chord OMPs: ", end=""); print(chord_omps)
# ...and then wrap each omp value with the corresponding temperature,
# and put each point into a dataframe.
te_idx = np.where(np.abs(self.temp_df.index.values-time) ==
np.abs(self.temp_df.index.values-time).min())[0][0]
chord_tes = self.temp_df.iloc[te_idx]
chord_nes = self.dens_df.iloc[te_idx]
self.temp_df_omp[time] = list(zip(chord_rminrsep_omps, chord_tes))
self.dens_df_omp[time] = list(zip(chord_rminrsep_omps, chord_nes))
# Put the psin that the chord is on too.
self.temp_df_omp[str(time) + ' psin'] = list(zip(chord_psins, chord_tes))
self.dens_df_omp[str(time) + ' psin'] = list(zip(chord_psins, chord_nes))
except Exception as e:
print("Error in the OMP steps: \n " + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
except Exception as e:
print("Error loading gfile: \n " + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
count += 1
# End "for time in times" loop.
# Create average Te and ne values mapped to the OMP.
self.avg_omp = {}
try:
avg_psins = np.array([])
avg_omps = np.array([])
avg_tes = np.array([])
avg_nes = np.array([])
avg_omps_err = np.array([])
avg_tes_err = np.array([])
avg_nes_err = np.array([])
for chord in range(0, len(self.temp_df_omp.index)):
tmp_psins = np.array([])
tmp_omps = np.array([])
tmp_tes = np.array([])
tmp_nes = np.array([])
#for time in range(0, len(times)):
for time in times:
# Get the tuple data point for this chord at this time (r-rsep_omp, Te).
tmp_p = self.temp_df_omp[str(time) + ' psin'].values[chord][0]
tmp_o = self.temp_df_omp[time].values[chord][0]
tmp_t = self.temp_df_omp[time].values[chord][1]
tmp_n = self.dens_df_omp[time].values[chord][1]
tmp_psins = np.append(tmp_psins, tmp_p)
tmp_omps = np.append(tmp_omps, tmp_o)
tmp_tes = np.append(tmp_tes, tmp_t)
tmp_nes = np.append(tmp_nes, tmp_n)
# Get the average for this chord. Append it to avg_omps/avg_tes.
avg_psins = np.append(avg_psins, tmp_psins.mean())
avg_omps = np.append(avg_omps, tmp_omps.mean())
avg_tes = np.append(avg_tes, tmp_tes.mean())
avg_nes = np.append(avg_nes, tmp_nes.mean())
avg_omps_err = np.append(avg_omps_err, tmp_omps.std())
avg_tes_err = np.append(avg_tes_err, tmp_tes.std())
avg_nes_err = np.append(avg_nes_err, tmp_nes.std())
# Store in dictionary in class.
self.avg_omp['Psin'] = avg_psins
self.avg_omp['RminRsep_omp'] = avg_omps
self.avg_omp['Te_omp'] = avg_tes
self.avg_omp['ne_omp'] = avg_nes
self.avg_omp['RminRsep_omp_err'] = avg_omps_err
self.avg_omp['Te_omp_err'] = avg_tes_err
self.avg_omp['ne_omp_err'] = avg_nes_err
except Exception as e:
print("Error in calculating the average OMP values: \n " + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def heatmap(self, te_clip=50, ne_clip=2e25, rlim_min=1.3, rlim_max=1.6,
zlim_min=-1.3, zlim_max=-1.0, detach_front_te=5, offset=0.01,
sp_hit_z=-1.25):
"""
Function to produce the 2D maps of Te and ne on the poloidal cross
section of ref_time. load_ts and map_to_efit must be run first to store
the needed data into the class!
te_clip: Clips any Te data above this value. Use if you have problems
with high values messing up the countour plot.
ne_clip: Same, but for ne.
rlim_min: Limits on the plotting windows.
rlim_max: See above.
zlim_min: See above.
zlim_max: See above.
te_lims: Lower and upper limits for the contour plot levels. Won't plot
anything higher. Must be a tuple or None!!! Ex. (0, 100) will
only plot Te values in the range of 0-100 eV.
ne_lims: Same, but for ne.
sp_hit_z: The shelf is at Z = -1.25, so the SP hits at this location. Can
change to the floor location if that comes around as needed.
"""
# First make sure ref_time is in self.times.
if self.ref_time in self.times:
# Create empty arrays to hold Rs, Zs, Tes and Nes.
rs = np.zeros((len(self.ref_df.index), len(self.times)))
zs = np.zeros((len(self.ref_df.index), len(self.times)))
tes = np.zeros((len(self.ref_df.index), len(self.times)))
nes = np.zeros((len(self.ref_df.index), len(self.times)))
idx = 0
for time in self.times:
rs[:, idx] = [p[0] for p in self.ref_df[str(time)].values]
zs[:, idx] = [p[1] for p in self.ref_df[str(time)].values]
tes[:, idx] = [p for p in self.ref_df['Te at ' + str(time)].values]
nes[:, idx] = [p for p in self.ref_df['Ne at ' + str(time)].values]
idx += 1
# Perform clipping so it isn't skewed toward higher values.
tes = np.clip(tes, 0, te_clip)
nes = np.clip(nes, 0, ne_clip)
# Store rs, zs, tes, nes in class for debugging.
self.rs = rs; self.zs = zs; self.tes = tes; self.nes = nes
# For reference in messing with the plotting limits.
print('Te (min/max): ({:.2f}/{:.2f})'.format(tes.min(), tes.max()))
print('Ne (min/max): ({:.2e}/{:.2e})'.format(nes.min(), nes.max()))
# Function to plot wall with lcfs and strike point.
def plot_shot(fig, ax):
ax.plot(self.ref_gfile['wall'][:, 0], self.ref_gfile['wall'][:, 1], 'k')
ax.plot(self.ref_gfile['lcfs'][:, 0], self.ref_gfile['lcfs'][:, 1], 'k-')
ax.set_xlim([rlim_min, rlim_max])
ax.set_ylim([zlim_min, zlim_max])
ax.set_xlabel('R (m)')
ax.set_ylabel('Z (m)')
xpoint = np.where(self.ref_gfile['lcfs'][:,1] == self.ref_gfile['lcfs'][:,1].min())[0][0]
# Left leg.
sp_r1 = self.ref_gfile['lcfs'][:,0][xpoint-1]
sp_z1 = self.ref_gfile['lcfs'][:,1][xpoint-1]
sp_r0 = self.ref_gfile['lcfs'][:,0][xpoint]
sp_z0 = self.ref_gfile['lcfs'][:,1][xpoint]
m = (sp_z1 - sp_z0) / (sp_r1 - sp_r0)
sp_rs = np.linspace(sp_r0, 1.2, 10)
b = sp_z1 - m * sp_r1
sp_zs = m * sp_rs + b
ax.plot(sp_rs, sp_zs, 'k')
# Right leg.
sp_r1 = self.ref_gfile['lcfs'][:,0][xpoint+1]
sp_z1 = self.ref_gfile['lcfs'][:,1][xpoint+1]
sp_r0 = self.ref_gfile['lcfs'][:,0][xpoint]
sp_z0 = self.ref_gfile['lcfs'][:,1][xpoint]
m = (sp_z1 - sp_z0) / (sp_r1 - sp_r0)
sp_rs = np.linspace(sp_r0, 1.5, 1000)
b = sp_z1 - m * sp_r1
sp_zs = m * sp_rs + b
ax.plot(sp_rs, sp_zs, 'k')
# Store these values for use later in detach_length part.
self.sp_r0 = sp_r0; self.sp_z0 = sp_z0
self.sp_r1 = sp_r1; self.sp_z1 = sp_z1
self.sp_rs = sp_rs; self.sp_zs = sp_zs
# Te plot.
fig = plt.figure(figsize=(13,6))
ax1 = fig.add_subplot(121)
plot_shot(fig, ax1)
cont1 = ax1.contourf(rs, zs, tes,
levels=[0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25],
cmap='inferno', extend='max')
cbar1 = fig.colorbar(cont1)
ax1.set_title('Te (eV)')
cbar1.ax.set_ylabel('Te (eV)', size=24)
ax1.scatter(rs, zs, c=tes, edgecolor='k', cmap='inferno')
# Ne plot.
ax2 = fig.add_subplot(122)
plot_shot(fig, ax2)
cont2 = ax2.contourf(rs, zs, nes, levels=10, cmap='viridis', extend='max')
ax2.set_title('ne (m-3)')
cbar2 = fig.colorbar(cont2)
cbar2.ax.set_ylabel('ne (m-3)', size=24)
# Title it and plot.
fig.suptitle(str(self.shot) + " (raw)")
fig.tight_layout()
fig.show()
# Same as above, but with the interpolated data.
f_te = Rbf(self.rs, self.zs, self.tes, epsilon=1e-9)
f_ne = Rbf(self.rs, self.zs, self.nes)
RS, ZS = np.meshgrid(np.linspace(self.rs.min(), self.rs.max(), 100),
np.linspace(self.zs.min(), self.zs.max(), 100))
"""
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
plot_shot(fig, ax1)
cont1 = ax1.contourf(RS, ZS, f_te(RS, ZS),
levels=[0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25],
cmap='inferno')
cbar1 = fig.colorbar(cont1, extend='max')
ax1.set_title('Te (eV)')
cbar1.ax.set_ylabel('Te (eV)', size=24)
plot_shot(fig, ax2)
cont2 = ax2.contourf(RS, ZS, f_ne(RS, ZS), levels=10, cmap='viridis')
cbar2 = fig.colorbar(cont2)
ax2.set_title('ne (m-3)')
cbar2.ax.set_ylabel('ne (m-3)', size=24)
fig.suptitle(str(self.shot) + " (interpolated)")
fig.tight_layout()
fig.show()
"""
fig = plt.figure()
ax1 = fig.add_subplot(111)
plot_shot(fig, ax1)
cont1 = ax1.contourf(RS, ZS, np.clip(f_te(RS, ZS), 0, te_clip),
levels=[0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25],
cmap='inferno', extend='max')
cbar1 = fig.colorbar(cont1)
cbar1.ax.set_ylabel('Te (eV)', size=24)
ax1.set_xlabel('R (m)', fontsize=24)
ax1.set_ylabel('Z (m)', fontsize=24)
ax1.set_title(str(self.shot) + ' Te (eV)', fontsize=24)
# Routine to find how far down the leg the detachment goes.
# Detachment front defined here as detach_front_te.
if detach_front_te:
TES = f_te(RS, ZS)
def find_detach_front(offset=0):
dist_along_leg = 0
leg_rs = np.array([]); leg_zs = np.array([])
# Here's the part that measures it from the floor up.
# Slope of SP.
m = (self.sp_z1 - self.sp_z0) / (self.sp_r1 - self.sp_r0)
b = self.sp_z1 - m * self.sp_r1
# Find find the R of this where it hits the divertor (this is
# just point-slope formula).
sp_hit_r = 1/m * (sp_hit_z - self.sp_z0) + self.sp_r0
self.sp_hit_r = sp_hit_r
# Create a line between the X-point and this point where the SP hits.
length_rs = np.linspace(sp_hit_r, self.sp_r0, 1000)
length_zs = m * length_rs + b
self.length_rs = length_rs
self.length_zs = length_zs
# Go up this leg one point at a time until Te is > 5 eV.
for i in range(1, len(length_rs)):
tmp_r = length_rs[i] + offset
tmp_z = length_zs[i]
# Find the nearest point in the (R, Z) grid. To do this, first
# find the distance between our current leg point and each
# grid point.
grid_dist = np.sqrt(np.abs(RS-tmp_r)**2 + np.abs(ZS-tmp_z)**2)
# Then find index of the minimum distance.
closest_grid_point = np.where(grid_dist == grid_dist.min())
# Add the length travelled to our running distance from X-point.
dist_along_leg += np.sqrt((length_rs[i] - length_rs[i-1])**2
+ (length_zs[i] - length_zs[i-1])**2)
# If we hit detach_front_te, break and our answer is dist_along_leg.
#print("{:5.2f} {:5.2f} {5.2f}".format(tmp_r, tmp_z, TES[closest_grid_point]))
if TES[closest_grid_point] > detach_front_te:
print("Offset: {:.2f} cm".format(offset*100))
print("Detachment front is {:.2f} cm from the floor.".format(dist_along_leg*100))
self.detach_length = dist_along_leg
break
# Add the leg point to the list for plotting after.
leg_rs = np.append(leg_rs, tmp_r)
leg_zs = np.append(leg_zs, tmp_z)
"""
dist_along_leg = 0
leg_rs = np.array([]); leg_zs = np.array([])
for i in range(1, len(self.sp_rs)):
# This finds the distance starting from the X-point. May
# be better to instead look at it from the floor up instead.
# Get the point along the leg, starting at the first point
# past the X-point (i starts at 1).
tmp_r = self.sp_rs[i] + offset
tmp_z = self.sp_zs[i]
# Find the nearest point in the (R, Z) grid. To do this, first
# find the distance between our current leg point and each
# grid point.
grid_dist = np.sqrt(np.abs(RS-tmp_r)**2 + np.abs(ZS-tmp_z)**2)
# Then find index of the minimum distance.
closest_grid_point = np.where(grid_dist == grid_dist.min())
# Add the length travelled to our running distance from X-point.
dist_along_leg += np.sqrt((self.sp_rs[i] - self.sp_rs[i-1])**2
+ (self.sp_zs[i] - self.sp_zs[i-1])**2)
# If we hit detach_front_te, break and our answer is dist_along_leg.
if TES[closest_grid_point] < detach_front_te:
print("Offset: {:.2f} cm".format(offset*100))
print("Detachment front is {:.2f} cm from the X-point.".format(dist_along_leg*100))
self.detach_length = dist_along_leg
break
# Add the leg point to the list for plotting after.
leg_rs = np.append(leg_rs, tmp_r)
leg_zs = np.append(leg_zs, tmp_z)
"""
return leg_rs, leg_zs
self.leg_rs1, self.leg_zs1 = find_detach_front(offset=0)
self.leg_rs2, self.leg_zs2 = find_detach_front(offset=offset)
ax1.plot(self.leg_rs1, self.leg_zs1, 'r-')
ax1.plot(self.leg_rs2, self.leg_zs2, 'r-')
fig.tight_layout()
fig.show()
else:
print("Error: ref_time not one of the times in map_to_efit.")
def filter_elms(self, method='simple', fs='FS04', avg_thresh=2, plot_it=True,
window_len=11):
"""
NOTE: You probably should just use create_omfit_excel.py instead of
this, since OMFITprofiles is superior to filtering ELMs and such.
Method to filter ELM data. Replace Te, ne data that was taken during an
ELM with either exclude or with a linear fit between the value before the ELM
and the value at the end.
method : One of 'simple' or 'median'.
fs : Simple method. Which filterscope to get data from/
avg_thresh : Simple method. Anything above the average filterscope
value * avg_thresh will be considered an ELM and data in
that time range will be filtered out of the Thomson data.
plot_it : Plot the filtered data.
window_len : Size of window for filtering/smoothing method. Must be odd.
"""
print("Warning: Are you sure you want to use this? Consider using " + \
"the workflow via create_omfit_excel.py. It is way better at " + \
"getting ELM filtered data. Check there or the GitHub for more info.")
if window_len % 2 == 0:
raise ValueError("window_len must be an odd number.")
if method == 'simple':
# First pull in the filterscope data to detect ELMs.
fs_obj = gadata(fs, shot=self.shot, connection=self.conn)
# Indices of ELMs.
abv_avg = fs_obj.zdata > fs_obj.zdata.mean() * avg_thresh
# Plot it just to show it makes sense.
if plot_it:
fig, ax = plt.subplots()
ax.plot(fs_obj.xdata[~abv_avg], fs_obj.zdata[~abv_avg], 'k')
ax.plot(fs_obj.xdata[abv_avg], fs_obj.zdata[abv_avg], 'r')
ax.set_xlabel('Time (ms)')
ax.set_ylabel(fs)
fig.tight_layout()
fig.show()
# Create list of pairs of times, where anything between each pair is
# data to be filtered.
bad_times = []
in_bad = False
for i in range(0, len(abv_avg)):
if not in_bad:
if abv_avg[i] == True:
bad_start = fs_obj.xdata[i]
in_bad = True
else:
if abv_avg[i] == False:
bad_end = fs_obj.xdata[i-1]
in_bad = False
bad_range = (bad_start, bad_end)
bad_times.append(bad_range)
# Get the indices (or just True/False array) of rows in the
# temp_df/dens_df to filter out.
filter_times = np.full(len(self.temp_df.index), False)
filter_times_ref = []
for bt in bad_times:
# For the temp_df/dens_df.
elm_times = np.logical_and(self.temp_df.index > bt[0], self.temp_df.index < bt[1])
filter_times = np.logical_or(filter_times, elm_times)
# Index only the data that didn't fall in an ELM time range.
self.temp_df_filt = self.temp_df.iloc[~filter_times]
self.dens_df_filt = self.dens_df.iloc[~filter_times]
return None
elif method == 'median':
# Get the median filter from scipy.
from scipy.signal import medfilt
# Identify all the nonzero points since we don't care about zeros.
#te_nonzero = self.temp_df != 0
#ne_nonzero = self.dens_df != 0
# Perform a median filter on the data.
#te_filt = medfilt(self.temp_df[te_nonzero], window_len)
#ne_filt = medfilt(self.dens_df[ne_nonzero], window_len)
te_filt = medfilt(self.temp_df, window_len)
ne_filt = medfilt(self.dens_df, window_len)
# Plot all the chords if you want.
if plot_it:
num_col = 5
num_rows = int(np.ceil(len(self.temp_df.columns) / num_col))
for df, filt in [(self.temp_df, te_filt), (self.dens_df, ne_filt)]:
fig, axs = plt.subplots(num_rows, num_col, sharex=True)
axs = axs.flatten()
for chord in df.columns:
axs[chord].plot(df.index, df[chord], '-k.')
axs[chord].plot(df.index, filt[:, chord], '-r.')
if chord % num_col == 0:
if df.equals(self.temp_df):
axs[chord].set_ylabel('Te (eV)')
elif df.equals(self.dens_df):
axs[chord].set_ylabel('ne (m-3)')
if (num_rows * num_col - chord) < num_col:
axs[chord].set_xlabel('Time (ms)')
fig.tight_layout()
fig.show()
# Store the filtered data.
#self.temp_df_filt = self.temp_df.replace(te_filt)
#self.dens_df_filt = self.dens_df.replace(ne_filt)
elif method in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
def smooth(x, window_len=11, window='hanning'):
"""
This smoothing algorithm is taken form the scipy cookbook:
scipy-cookbook.readthedocs.io/items/SignalSmooth.html
See there for an explanation, but essentially it just smooths
the data.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1], x, x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
#return y
return y[int(window_len / 2):-int(window_len / 2)]
te_filt = np.zeros(self.temp_df.values.shape)
ne_filt = np.zeros(self.temp_df.values.shape)
for chord in range(0, len(self.temp_df.columns)):
chord_te_filt = smooth(self.temp_df[chord].values, window_len, method)
chord_ne_filt = smooth(self.dens_df[chord].values, window_len, method)
te_filt[:, chord] = chord_te_filt
ne_filt[:, chord] = chord_ne_filt
# This method applies a rolling average and then a rolling median to
# filter out ELMs (or just really smooth it).
elif method == 'average_median':
# Transpose just so each row is the time series data. Will transpose
# back at the end.
te_filt = np.zeros(self.temp_df.values.shape).T
ne_filt = np.zeros(self.temp_df.values.shape).T
times = self.temp_df.index.values
for chord in range(0, te_filt.shape[0]):
# First calculate the rolling average.
roll_avg_te = np.zeros(len(times))
roll_avg_ne = np.zeros(len(times))
for i in range(0, len(times)):
if (i < window_len) or len(times) - i < window_len:
te_point = self.temp_df[chord].values[i]
ne_point = self.dens_df[chord].values[i]
else:
te_point = np.average(self.temp_df[chord].values[i-int(window_len/2):i+int(window_len/2)])
ne_point = np.average(self.dens_df[chord].values[i-int(window_len/2):i+int(window_len/2)])
# Put into array.
roll_avg_te[i] = te_point
roll_avg_ne[i] = ne_point
# Then same thing, just do median of the rolling average array.
roll_med_te = np.zeros(len(times))
roll_med_ne = np.zeros(len(times))
for i in range(0, len(times)):
if (i < window_len) or len(times) - i < window_len:
te_point = self.temp_df[chord].values[i]
ne_point = self.dens_df[chord].values[i]
else:
te_point = np.median(roll_avg_te[i-int(window_len/2):i+int(window_len/2)])
ne_point = np.median(roll_avg_ne[i-int(window_len/2):i+int(window_len/2)])
# Put into array.
roll_med_te[i] = te_point
roll_med_ne[i] = ne_point
# Finally put the filtered data for this chord into the filtered array.
te_filt[chord] = roll_med_te
ne_filt[chord] = roll_med_ne
# Don't forget to transpose the data again.
te_filt = te_filt.T
ne_filt = ne_filt.T
elif method == 'median_average':
# Transpose just so each row is the time series data. Will transpose
# back at the end.
te_filt = np.zeros(self.temp_df.values.shape).T
ne_filt = np.zeros(self.temp_df.values.shape).T
times = self.temp_df.index.values
for chord in range(0, te_filt.shape[0]):
# First calculate the rolling average.
roll_avg_te = np.zeros(len(times))
roll_avg_ne = np.zeros(len(times))
for i in range(0, len(times)):
if (i < window_len) or (len(times) - i < window_len):
te_point = self.temp_df[chord].values[i]
ne_point = self.dens_df[chord].values[i]
else:
te_point = np.median(self.temp_df[chord].values[i-int(window_len/2):i+int(window_len/2)+1])
ne_point = np.median(self.dens_df[chord].values[i-int(window_len/2):i+int(window_len/2)+1])
# Put into array.
roll_avg_te[i] = te_point
roll_avg_ne[i] = ne_point
# Then same thing, just do median of the rolling average array.
roll_med_te = np.zeros(len(times))
roll_med_ne = np.zeros(len(times))
for i in range(0, len(times)):
if (i < window_len) or (len(times) - i < window_len):
te_point = self.temp_df[chord].values[i]
ne_point = self.dens_df[chord].values[i]
else:
te_point = np.average(roll_avg_te[i-int(window_len/2):i+int(window_len/2)+1])
ne_point = np.average(roll_avg_ne[i-int(window_len/2):i+int(window_len/2)+1])
# Put into array.
roll_med_te[i] = te_point
roll_med_ne[i] = ne_point
# Finally put the filtered data for this chord into the filtered array.
te_filt[chord] = roll_med_te
ne_filt[chord] = roll_med_ne
# Don't forget to transpose the data again.
te_filt = te_filt.T
ne_filt = ne_filt.T
# Store the filtered data.
self.temp_df_filt = pd.DataFrame(te_filt, columns=self.temp_df.columns, index=self.temp_df.index)
self.dens_df_filt = | pd.DataFrame(ne_filt, columns=self.temp_df.columns, index=self.temp_df.index) | pandas.DataFrame |
import os
from useful_scit.util.make_folders import make_folders
import pandas as pd
#####################################################################
# FILL IN FILEPATHS:
#####################################################################
# fill in path to project location (not including /OAS-DEV)
project_base_path = '/home/ubuntu/mnts/nird/projects/'
# name:
project_name = 'OAS-DEV'
# Fill in path to raw input data from NorESM:
raw_data_path_NorESM = project_base_path + 'model_output/archive/'
# Fill in path to raw input data from EUSAAR
path_eusaar_data = project_base_path + '/EUSAAR_data'
# Output processed data to:
path_outdata = project_base_path + '/Output_data_' + project_name + '/'
#####################################################################
# END FILL IN PART (no need to edit under this line)
#####################################################################
pathdic_raw_data = {'NorESM': raw_data_path_NorESM}#[file_source]}
def get_input_datapath(model = 'NorESM', file_source=None):
return pathdic_raw_data[model]
## Plots path:
path_plots = project_base_path + '/Plots_' + project_name + '/'
paths_plotsave= dict(maps=path_plots + 'maps/',
comparison=path_plots + 'global_comparison/',
lineprofiles = path_plots + 'lineprofiles/',
sizedist = path_plots+'sizedistribution/',
sizedist_time = path_plots+'sizedist_time/',
levlat = path_plots+'levlat/',
eusaar = path_plots + 'eusaar/'
)
def get_plotpath(key):
if key in paths_plotsave:
return paths_plotsave[key]
else:
return path_plots+'/'+key + '/'
path_EBAS_data=project_base_path + '/EBAS_data'
# eusaar reformatted data:
path_eusaar_outdata = path_eusaar_data
def get_outdata_base():
return path_outdata
outpaths={}
outpaths['pressure_coords']= path_outdata + '/Fields_pressure_coordinates'
outpaths['original_coords']= path_outdata + '/computed_fields_ng'
outpaths['computed_fields_ng']=path_outdata + '/computed_fields_ng' #native grid computed fields
outpaths['pressure_coords_converstion_fields'] = path_outdata +'/Pressure_coordinates_conversion_fields'
outpaths['pressure_density_path'] =path_outdata + '/Pressure_density'
outpaths['masks'] = path_outdata + '/means/masks/'
outpaths['area_means'] = path_outdata + '/means/area_means/'
outpaths['map_means'] = path_outdata+ '/means/map_means/'
outpaths['levlat_means'] = path_outdata+ '/means/levlat_means/'
outpaths['profile_means'] = path_outdata + '/means/profile_means/'
outpaths['sizedistrib_files'] = path_outdata + '/sizedistrib_files'
outpaths['collocated'] = path_outdata + '/collocated_ds/'
outpaths['eusaar'] = path_outdata + '/eusaar/'
def get_outdata_path(key):
if key in outpaths:
return outpaths[key]
else:
print('WARNING: key not found in outpaths, constants.py')
return path_outdata +'/' + key
make_folders(path_outdata)
# data info
path_data_info = project_base_path + 'OAS-DEV/oas_dev/data_info/'
# output locations:
locations = ['LON_116e_LAT_40n', 'LON_24e_LAT_62n', 'LON_63w_LAT_3s', 'LON_13e_LAT_51n']
path_locations_file = path_data_info+'locations.csv'
if os.path.isfile(path_locations_file):
collocate_locations = pd.read_csv(path_locations_file, index_col=0)
else:
_dic = dict(Hyytiala={'lat': 61.51, 'lon': 24.17},
Melpitz={'lat': 51.32, 'lon': 12.56},
Amazonas={'lat': -3., 'lon': -63.},
Beijing={'lat': 40, 'lon': 116})
collocate_locations = | pd.DataFrame.from_dict(_dic) | pandas.DataFrame.from_dict |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for Population and Observation methods in the Data Commons Python
Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
from pandas.util.testing import assert_series_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
import zlib
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json and allowed constraining properties
req = kwargs['json']
headers = kwargs['headers']
constrained_props = [
{
'property': 'placeOfBirth',
'value': 'BornInOtherStateInTheUnitedStates'
},
{
'property': 'age',
'value': 'Years5To17'
}
]
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_populations.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_populations']\
and req['population_type'] == 'Person'\
and req['pvs'] == constrained_props:
if req['dcids'] == ['geoId/06085', 'geoId/4805000']:
# Response returned when querying for multiple valid dcids.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'population': 'dc/p/crgfn8blpvl35'
},
{
'dcid': 'geoId/4805000',
'population': 'dc/p/f3q9whmjwbf36'
}
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response returned when querying for a dcid that does not exist.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'population': 'dc/p/crgfn8blpvl35'
},
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid'] or req['dcids'] == []:
# Response returned when both given dcids do not exist or no dcids are
# provided to the method.
res_json = json.dumps([])
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_observations
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_observations']\
and req['measured_property'] == 'count'\
and req['stats_type'] == 'measuredValue'\
and req['observation_date'] == '2018-12'\
and req['observation_period'] == 'P1M'\
and req['measurement_method'] == 'BLSSeasonallyAdjusted':
if req['dcids'] == ['dc/p/x6t44d8jd95rd', 'dc/p/lr52m1yr46r44', 'dc/p/fs929fynprzs']:
# Response returned when querying for multiple valid dcids.
res_json = json.dumps([
{
'dcid': 'dc/p/x6t44d8jd95rd',
'observation': '18704962.000000'
},
{
'dcid': 'dc/p/lr52m1yr46r44',
'observation': '3075662.000000'
},
{
'dcid': 'dc/p/fs929fynprzs',
'observation': '1973955.000000'
}
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/p/x6t44d8jd95rd', 'dc/MadDcid']:
# Response returned when querying for a dcid that does not exist.
res_json = json.dumps([
{
'dcid': 'dc/p/x6t44d8jd95rd',
'observation': '18704962.000000'
},
])
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid'] or req['dcids'] == []:
# Response returned when both given dcids do not exist or no dcids are
# provided to the method.
res_json = json.dumps([])
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_place_obs
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_place_obs']\
and req['place_type'] == 'City'\
and req['observation_date'] == '2017'\
and req['population_type'] == 'Person'\
and req['pvs'] == constrained_props:
res_json = json.dumps({
'places': [
{
'name': '<NAME>',
'place': 'geoId/4247344',
'populations': {
'dc/p/pq6frs32sfvk': {
'observations': [
{
'marginOfError': 39,
'measuredProp': 'count',
'measuredValue': 67,
}
],
}
}
}
]
})
return MockResponse({
'payload': base64.b64encode(zlib.compress(res_json.encode('utf-8')))
}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
def get_request_mock(*args, **kwargs):
""" A mock GET requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for get requests to get_pop_obs.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_pop_obs'] + '?dcid=geoId/06085':
# Response returned when querying for a city in the graph.
res_json = json.dumps({
'name': 'Mountain View',
'placeType': 'City',
'populations': {
'dc/p/013ldrstf6lnf': {
'numConstraints': 6,
'observations': [
{
'marginOfError': 119,
'measuredProp': 'count',
'measuredValue': 225,
'measurementMethod': 'CensusACS5yrSurvey',
'observationDate': '2014'
}, {
'marginOfError': 108,
'measuredProp': 'count',
'measuredValue': 180,
'measurementMethod': 'CensusACS5yrSurvey',
'observationDate': '2012'
}
],
'popType': 'Person',
'propertyValues': {
'age': 'Years16Onwards',
'gender': 'Male',
'income': 'USDollar30000To34999',
'incomeStatus': 'WithIncome',
'race': 'USC_HispanicOrLatinoRace',
'workExperience': 'USC_NotWorkedFullTime'
}
}
}
})
return MockResponse({
'payload': base64.b64encode(zlib.compress(res_json.encode('utf-8')))
}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPopulations(unittest.TestCase):
""" Unit tests for get_populations. """
_constraints = {
'placeOfBirth': 'BornInOtherStateInTheUnitedStates',
'age': 'Years5To17'
}
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_populations with proper dcids returns valid results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Call get_populations
populations = dc.get_populations(['geoId/06085', 'geoId/4805000'], 'Person',
constraining_properties=self._constraints)
self.assertDictEqual(populations, {
'geoId/06085': 'dc/p/crgfn8blpvl35',
'geoId/4805000': 'dc/p/f3q9whmjwbf36'
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_populations with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Call get_populations
pops_1 = dc.get_populations(['geoId/06085', 'dc/MadDcid'], 'Person',
constraining_properties=self._constraints)
pops_2 = dc.get_populations(['dc/MadDcid', 'dc/MadderDcid'], 'Person',
constraining_properties=self._constraints)
# Verify the results
self.assertDictEqual(pops_1, {'geoId/06085': 'dc/p/crgfn8blpvl35'})
self.assertDictEqual(pops_2, {})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_populations with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
pops = dc.get_populations(
[], 'Person', constraining_properties=self._constraints)
self.assertDictEqual(pops, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_multiple_dcids(self, post_mock):
""" Calling get_populations with a Pandas Series and proper dcids returns
a Pandas Series with valid results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get the input and expected output
dcids = pd.Series(['geoId/06085', 'geoId/4805000'])
expected = pd.Series(['dc/p/crgfn8blpvl35', 'dc/p/f3q9whmjwbf36'])
# Call get_populations
actual = dc.get_populations(
dcids, 'Person', constraining_properties=self._constraints)
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_populations with a Pandas Series and dcids that do not exist
returns empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get input and expected output
dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series(['dc/p/crgfn8blpvl35', ''])
expected_2 = pd.Series(['', ''])
# Call get_populations
actual_1 = dc.get_populations(
dcids_1, 'Person', constraining_properties=self._constraints)
actual_2 = dc.get_populations(
dcids_2, 'Person', constraining_properties=self._constraints)
# Assert that the results are correct
assert_series_equal(actual_1, expected_1)
assert_series_equal(actual_2, expected_2)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_no_dcids(self, post_mock):
""" Calling get_populations with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = pd.Series([])
expected = pd.Series([])
# Call get_populations
actual = dc.get_populations(
dcids, 'Person', constraining_properties=self._constraints)
assert_series_equal(actual, expected)
class TestGetObservations(unittest.TestCase):
""" Unit tests for get_observations. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_observations with proper dcids returns valid results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['dc/p/x6t44d8jd95rd', 'dc/p/lr52m1yr46r44', 'dc/p/fs929fynprzs']
expected = {
'dc/p/lr52m1yr46r44': 3075662.0,
'dc/p/fs929fynprzs': 1973955.0,
'dc/p/x6t44d8jd95rd': 18704962.0
}
actual = dc.get_observations(dcids, 'count', 'measuredValue', '2018-12',
observation_period='P1M',
measurement_method='BLSSeasonallyAdjusted')
self.assertDictEqual(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_observations with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get the input
dcids_1 = ['dc/p/x6t44d8jd95rd', 'dc/MadDcid']
dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Call get_observations
actual_1 = dc.get_observations(dcids_1, 'count', 'measuredValue', '2018-12',
observation_period='P1M',
measurement_method='BLSSeasonallyAdjusted')
actual_2 = dc.get_observations(dcids_2, 'count', 'measuredValue', '2018-12',
observation_period='P1M',
measurement_method='BLSSeasonallyAdjusted')
# Verify the results
self.assertDictEqual(actual_1, {'dc/p/x6t44d8jd95rd': 18704962.0})
self.assertDictEqual(actual_2, {})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_observations with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
actual = dc.get_observations([], 'count', 'measuredValue', '2018-12',
observation_period='P1M',
measurement_method='BLSSeasonallyAdjusted')
self.assertDictEqual(actual, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_multiple_dcids(self, post_mock):
""" Calling get_observations with a Pandas Series and proper dcids returns
a Pandas Series with valid results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = pd.Series(
['dc/p/x6t44d8jd95rd', 'dc/p/lr52m1yr46r44', 'dc/p/fs929fynprzs'])
expected = | pd.Series([18704962.0, 3075662.0, 1973955.0]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import yaml
import os, os.path
import requests
import pandas as pd
from app.util import StateAbbrLookup
ASTHMA_YAML_PATH = os.path.join(os.getcwd(), 'data/raw/asthma.yaml')
ASTHMA_CSV_PATH = os.path.join(os.getcwd(), 'data/cleaned/asthma/all.csv')
lookup = StateAbbrLookup()
def download_ala_data():
df = None
with open(ASTHMA_YAML_PATH, 'r') as in_file:
all_states = yaml.safe_load(in_file)
for idx, state_info in enumerate(all_states):
state_abbr = lookup.get_abbr(state_info['state'])
resp = requests.get(state_info['jsonUrl'])
print("Reading %s (%d/%d)" % (state_abbr, idx+1, len(all_states)), end='\r')
if not resp.ok:
print("Bad resp: ", resp.error)
asthma_json = resp.json()
if df is None:
# First time, initialize with columns
keys = [pop['name']
for pop in asthma_json[0]['FormattedPopulations']]
columns = ['State', 'County']
columns.extend(keys)
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = | DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']}) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
notna,
)
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(series):
A = series
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(frame):
result = frame.expanding().cov()
rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(frame):
result = frame.expanding().corr()
rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_func(func, static_comp, frame_or_series):
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = getattr(data.expanding(min_periods=1, axis=0), func)()
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[10], static_comp(data[:11]))
else:
tm.assert_series_equal(
result.iloc[10], static_comp(data[:11]), check_names=False
)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_min_periods(func, static_comp):
ser = Series(np.random.randn(50))
result = getattr(ser.expanding(min_periods=30, axis=0), func)()
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = getattr(ser.expanding(min_periods=15, axis=0), func)()
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result0, result1)
result = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_expanding_apply(engine_and_raw, frame_or_series):
engine, raw = engine_and_raw
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = data.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[9], np.mean(data[:11]))
else:
tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
def test_expanding_min_periods_apply(engine_and_raw):
engine, raw = engine_and_raw
ser = Series(np.random.randn(50))
result = ser.expanding(min_periods=30).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
# min_periods is working correctly
result = ser.expanding(min_periods=15).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = ser2.expanding(min_periods=5).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = ser.expanding(min_periods=0).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
result1 = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result0, result1)
result = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_std(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.expanding(min_periods=min_periods).corr(x)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
std_y = x.expanding(min_periods=min_periods).std(ddof=ddof)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if ddof == 0:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.expanding(min_periods=min_periods).mean()
mean_y = x.expanding(min_periods=min_periods).mean()
mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
result = x.expanding(min_periods=min_periods).mean()
expected = (
x.expanding(min_periods=min_periods).sum()
/ x.expanding(min_periods=min_periods).count()
)
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding().count()
mean_x = x.expanding(min_periods=min_periods).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.expanding(min_periods=min_periods).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
# check variance debiasing factors
var_unbiased_x = x.expanding(min_periods=min_periods).var()
var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0)
var_debiasing_factors_x = x.expanding().count() / (
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
| tm.assert_frame_equal(df1_result, df1_expected) | pandas._testing.assert_frame_equal |
"""Thermal grid models module."""
import itertools
from multimethod import multimethod
import numpy as np
import pandas as pd
import scipy.constants
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.der_models
import mesmo.solutions
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ThermalGridModel(mesmo.utils.ObjectBase):
"""Thermal grid model object."""
timesteps: pd.Index
node_names: pd.Index
line_names: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
branch_loops: pd.Index
ders: pd.Index
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
branch_incidence_matrix: sp.spmatrix
branch_incidence_matrix_no_source_no_loop: sp.spmatrix
branch_incidence_matrix_no_source_loop: sp.spmatrix
branch_loop_incidence_matrix: sp.spmatrix
der_node_incidence_matrix: sp.spmatrix
der_thermal_power_vector_reference: np.ndarray
branch_flow_vector_reference: np.ndarray
node_head_vector_reference: np.ndarray
# TODO: Revise / reduce use of parameter attributes if possible.
line_parameters: pd.DataFrame
energy_transfer_station_head_loss: float
enthalpy_difference_distribution_water: float
distribution_pump_efficiency: float
source_der_model: mesmo.der_models.DERModel
plant_efficiency: float
def __init__(self, scenario_name: str):
# Obtain thermal grid data.
thermal_grid_data = mesmo.data_interface.ThermalGridData(scenario_name)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear thermal grid models.
self.timesteps = thermal_grid_data.scenario_data.timesteps
# Obtain node / line / DER names.
self.node_names = pd.Index(thermal_grid_data.thermal_grid_nodes["node_name"])
self.line_names = pd.Index(thermal_grid_data.thermal_grid_lines["line_name"])
self.der_names = pd.Index(thermal_grid_data.thermal_grid_ders["der_name"])
self.der_types = pd.Index(thermal_grid_data.thermal_grid_ders["der_type"]).unique()
# Obtain node / branch / DER index set.
nodes = pd.concat(
[
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"]
.apply(
# Obtain `node_type` column.
lambda value: "source"
if value == thermal_grid_data.thermal_grid.at["source_node_name"]
else "no_source"
)
.rename("node_type"),
thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"],
],
axis="columns",
)
self.nodes = pd.MultiIndex.from_frame(nodes)
self.branches = pd.MultiIndex.from_product([self.line_names, ["no_loop"]], names=["branch_name", "loop_type"])
self.branch_loops = pd.MultiIndex.from_tuples([], names=["loop_id", "branch_name"]) # Values are filled below.
self.ders = | pd.MultiIndex.from_frame(thermal_grid_data.thermal_grid_ders[["der_type", "der_name"]]) | pandas.MultiIndex.from_frame |
'''
Created on 17 Nov 2017
@author: husensofteng
'''
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import tight_layout
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import pandas as pd
import numpy as np
import seaborn as sns
import psycopg2
sns.set_style("white")
#from multiprocessing import Pool
import multiprocessing as mp
from pybedtools import BedTool
#plt.style.use('ggplot')
#sns.set_context("paper")#talk
#plt.style.use('seaborn-ticks')
params = {'-sep': '\t',
'-cols_to_retrieve':'chr, motifstart, motifend, strand, name, score, pval, fscore, chromhmm, contactingdomain, dnase__seq, fantom, loopdomain, numothertfbinding, othertfbinding, replidomain, tfbinding, tfexpr', '-number_rows_select':'all',
'-restart_conn_after_n_queries':100000, '-variants':True, '-regions':True,
'-chr':0, '-start':1, '-end':2, '-ref':3, '-alt':4,
'-db_name':'funmotifsdbtest', '-db_host':'localhost', '-db_port':5432, '-db_user':'huum', '-db_password':'',
'-all_motifs':True, '-motifs_tfbining':False, '-max_score_motif':False, '-motifs_tfbinding_otherwise_max_score_motif':False,
'-verbose': True}
def get_params(params_list, params_without_value):
global params
for i, arg in enumerate(params_list):#priority is for the command line
if arg.startswith('-'):
if arg in params_without_value:
params[arg] = True
else:
try:
v = params_list[i+1]
if v.lower()=='yes' or v.lower()=='true':
v=True
elif v.lower()=='no' or v.lower()=='false':
v=False
params[arg] = v
except IndexError:
print("no value is given for parameter: ", arg )
return params
def open_connection():
conn = psycopg2.connect("dbname={} user={} password={} host={} port={}".format(params['-db_name'], params['-db_user'], params['-db_password'], params['-db_host'], params['-db_port']))
return conn
def plot_motif_freq(tfs, tissue_tables, motifs_table, min_fscore, fig_name):
conn = open_connection()
curs = conn.cursor()
fig = plt.figure(figsize=(6*len(tissue_tables),10))
gs = gridspec.GridSpec(len(tissue_tables), 1, wspace=0.0, hspace=0.0)#height_ratios=[4,2], width_ratios=[4,2], wspace=0.0, hspace=0.0)#create 4 rows and three columns with the given ratio for each
for i,tissue_table in enumerate(tissue_tables):
tfs_freq = []
for tf_name in tfs:
stmt_all = "select count({tissue}.mid) from {motifs},{tissue} where {motifs}.mid={tissue}.mid and {motifs}.name like '%{tf_name}%'".format(motifs=motifs_table, tissue=tissue_table, tf_name=tf_name)
print(stmt_all)
stmt_tfbinding = "select count({tissue}.mid) from {motifs},{tissue} where {motifs}.mid={tissue}.mid and {motifs}.name like '%{tf_name}%' and ({tissue}.tfbinding>0 and {tissue}.tfbinding!='NaN')".format(motifs=motifs_table, tissue=tissue_table,tf_name=tf_name)
stmt_dnase = "select count({tissue}.mid) from {motifs},{tissue} where {motifs}.mid={tissue}.mid and {motifs}.name like '%{tf_name}%' and ({tissue}.dnase__seq>0 and {tissue}.dnase__seq!='NaN')".format(motifs=motifs_table, tissue=tissue_table, tf_name=tf_name)
stmt_active = "select count({tissue}.mid) from {motifs},{tissue} where {motifs}.mid={tissue}.mid and tfexpr>0 and {motifs}.name like '%{tf_name}%' and ((fscore>{min_fscore} and dnase__seq>0 and dnase__seq!='NaN' and (tfbinding>0 or {tissue}.tfbinding='NaN')) or (tfbinding>0 and {tissue}.tfbinding!='NaN' and {tissue}.dnase__seq>0))".format(motifs=motifs_table, tissue=tissue_table, tf_name=tf_name, min_fscore=min_fscore)
curs.execute(stmt_all)
motifs_all = curs.fetchall()
curs.execute(stmt_tfbinding)
tfbinding = curs.fetchall()
curs.execute(stmt_dnase)
dnase = curs.fetchall()
curs.execute(stmt_active)
active = curs.fetchall()
tfs_freq.extend([[tf_name, tissue_table, 'All Motifs', int(motifs_all[0][0])],
[tf_name, tissue_table, 'DHSs', int(dnase[0][0])],
[tf_name, tissue_table, 'Matching TFBSs', int(tfbinding[0][0])],
[tf_name, tissue_table, 'Functional Motifs', int(active[0][0])]])
df = pd.DataFrame(tfs_freq, columns = ['TFs', 'Tissue', 'Activity', 'Number of motifs'])
#df['Number of motifs'] = df['Number of motifs'].apply(np.log2)
ax = fig.add_subplot(gs[i, 0])
s = sns.barplot(x='TFs', y='Number of motifs', hue='Activity', data=df, estimator=sum, ax=ax)
s.set(ylabel='Number of motifs (log2)', xlabel='')
ax.legend_.remove()
sns.despine(right=True, top=True, bottom=False, left=False)
curs.close()
plt.legend(bbox_to_anchor=(1, 1), loc=4)
gs.tight_layout(fig, pad=1, h_pad=2.0, w_pad=0.0)
plt.savefig(fig_name+'.pdf')
plt.savefig(fig_name+'.svg')
plt.close()
def plot_fscore(tf_name, tissue_table, motifs_table, tissue_names, fig_name):
conn = open_connection()
curs = conn.cursor()
stmt_all = "select {tissue_names} from {motifs},{tissue} where {motifs}.mid={tissue}.mid and {motifs}.name like '%{tf_name}%'".format(
tissue_names=','.join(sorted(tissue_names)), motifs=motifs_table, tissue=tissue_table, tf_name=tf_name)
print(stmt_all)
curs.execute(stmt_all)
scores_all = curs.fetchall()
curs.close()
df = pd.DataFrame(scores_all, columns=tissue_names)
s = sns.boxplot(data=df, color='grey')
ss = s.get_figure()
ss.savefig(fig_name+'.pdf', bbox_inches='tight')
ss.savefig(fig_name+'.svg', bbox_inches='tight')
return
def plot_fscore_all(ax, table_name, motifs_table, tissue_names, fig_name):
conn = open_connection()
curs = conn.cursor()
stmt_all = "select {tissue_names} from {table_name},{motifs} where {motifs}.mid={table_name}.mid and {motifs}.chr=1".format(
tissue_names=','.join(tissue_names), table_name=table_name, motifs=motifs_table)
print(stmt_all)
curs.execute(stmt_all)
scores_all = curs.fetchall()
curs.close()
df = pd.DataFrame(scores_all, columns=tissue_names)
print(df.head())
sns.swarmplot(data=df, ax=ax, color='grey', linewidth=0.5)
sns.despine(right=True, top=True, bottom=False, left=False)
ax.set_xlabel('')
ax.set_ylabel('Functionality Scores')
ax.set_ylim(0,5)
return
def plot_fscore_all_selected_tfs(ax, table_name, motifs_table, tissue_names, tfs, fig_name):
conn = open_connection()
curs = conn.cursor()
scores_all = []
for tf in tfs:
stmt_all = "select {tissue_names} from {table_name},{motifs} where {motifs}.mid={table_name}.mid and {motifs}.name like '%{tf}%'".format(
tissue_names=','.join(tissue_names), table_name=table_name, motifs=motifs_table, tf=tf)
print(stmt_all)
curs.execute(stmt_all)
tf_scores = curs.fetchall()
tf_scores_list = pd.DataFrame(tf_scores, columns=tissue_names).stack().tolist()
print(len(tf_scores_list))
scores_all.append(tf_scores_list)
curs.close()
print(len(scores_all))
sns.swarmplot(data=scores_all, color='grey', ax=ax, linewidth=0.5)
ax.set_xticklabels(tfs)
ax.set_xlabel('')
ax.set_ylabel('Functionality Scores')
ax.set_ylim(0,5)
sns.despine(right=True, top=True, bottom=False, left=False)
def plot_fscores_myloid(ax, table_name, fig_name):
conn = open_connection()
curs = conn.cursor()
scores_all = []
stmts_boundmotifs = "select fscore from {table_name} where tfbinding>0 and tfbinding!='NaN' and dnase__seq>0".format(
table_name=table_name)
stmts_unboundmotifs = "select fscore from {table_name} where tfbinding=0 and tfbinding!='NaN'".format(
table_name=table_name)
print(stmts_boundmotifs)
curs.execute(stmts_boundmotifs)
fscores_boundmotifs = curs.fetchall()
fscores_boundmotifs_list = pd.DataFrame(fscores_boundmotifs, columns=['fscore']).stack().tolist()
print(len(fscores_boundmotifs_list))
scores_all.append(fscores_boundmotifs_list)
print(stmts_unboundmotifs)
curs.execute(stmts_unboundmotifs)
fscores_unboundmotifs = curs.fetchall()
fscores_boundmotifs_list = pd.DataFrame(fscores_unboundmotifs, columns=['fscore']).stack().tolist()
scores_all.append(fscores_boundmotifs_list)
curs.close()
sns.swarmplot(data=scores_all, color='grey', ax=ax, linewidth=0.5)
ax.set_xticklabels(['Bound motifs', 'Unbound motifs'])
ax.set_xlabel('')
ax.set_ylabel('Functionality Scores')
ax.set_ylim(0,5)
sns.despine(right=True, top=True, bottom=False, left=False)
def plot_heatmap(motifs_table,tissue_table, fig_name, threshold_to_include_tf, otherconditions):
conn = open_connection()
curs = conn.cursor()
stmt_all = "select chromhmm, upper(split_part(name,'_', 1)), count(name) from {motifs},{tissue} where {motifs}.mid={tissue}.mid {otherconditions} group by chromhmm,name order by chromhmm".format(
motifs=motifs_table, tissue=tissue_table, otherconditions=otherconditions)
print(stmt_all)
curs.execute(stmt_all)
scores_all = curs.fetchall()
curs.close()
df = pd.DataFrame(scores_all, columns=['Chromatin States', 'TFs', 'Frequency'])
df_pivot = df.pivot('Chromatin States', 'TFs', 'Frequency')
df_pivot_filtered = pd.DataFrame()
for c in df_pivot.columns:
if df_pivot[c].sum()>threshold_to_include_tf:
df_pivot_filtered[c] = df_pivot[c]
print(df_pivot_filtered.head())
if len(df_pivot_filtered)>0:
s = sns.heatmap(data=df_pivot_filtered, square=True, cbar=True)
ss = s.get_figure()
ss.savefig(fig_name+'.pdf', bbox_inches='tight')
ss.savefig(fig_name+'.svg', bbox_inches='tight')
def plot_scatter_plot(motifs_table, tissue_tables, otherconditions, figname):
conn = open_connection()
curs = conn.cursor()
dfs = []
for tissue_table in tissue_tables:
stmt_all = "select upper(split_part(name,'_', 1)), count(name) as freq from {motifs},{tissue} where {motifs}.mid={tissue}.mid {otherconditions} group by name order by freq desc".format(
motifs=motifs_table, tissue=tissue_table, otherconditions=otherconditions)
print(stmt_all)
curs.execute(stmt_all)
scores_all = curs.fetchall()
df = pd.DataFrame(scores_all, columns=['TFs', 'Number of Functional Motifs per TF'])
df['Tissue']=[tissue_table for i in range(0,len(df))]
dfs.append(df)
curs.close()
all_dfs = pd.concat(dfs)
print(all_dfs.head())
fig = plt.figure(figsize=(13,8))
s = sns.stripplot(x='Tissue', y='Number of Functional Motifs per TF', data=all_dfs, jitter=True)
sns.despine(right=True, top=True, bottom=True, left=False)
s.set(xlabel='', ylabel='Number of motifs', ylim=(0,70000))
for i, r in all_dfs.iterrows():
if r['Number of Functional Motifs per TF']>=30000:
print(r)
print((r['TFs'], (tissue_tables.index(r['Tissue']),r['Number of Functional Motifs per TF']),
(tissue_tables.index(r['Tissue']), r['Number of Functional Motifs per TF']+20)))
s.annotate(r['TFs'], xy=(tissue_tables.index(r['Tissue']),r['Number of Functional Motifs per TF']),
xytext=(tissue_tables.index(r['Tissue']), r['Number of Functional Motifs per TF']+4000),rotation=45)
ss = s.get_figure()
ss.savefig(figname + '.pdf', bbox_inches='tight')
ss.savefig(figname + '.svg', bbox_inches='tight')
return df
def run_query(query_stmt, tissue_table, cols):
conn = open_connection()
curs = conn.cursor()
curs.execute(query_stmt)
query_results = curs.fetchall()
df = | pd.DataFrame(query_results, columns=cols) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = | Series(data) | pandas.Series |
# Data Science with SQL Server Quick Start Guide
# Chapter 03
# This is a comment
print("Hello World!")
# This line is ignored - it is a comment again
print('Another string.')
print('O"Brien') # In-line comment
print("O'Brien")
# Simple expressions
3 + 2
print("The result of 5 + 30 / 6 is:", 5 + 30 / 6)
10 * 3 - 6
11 % 4
print("Is 8 less or equal to 5?", 8 <= 5)
print("Is 8 greater than 5?", 8 > 5)
# Integer
a = 3
b = 4
a ** b
# Float
c = 6.0
d = float(7)
print(c, d)
# Formatted strings
# Variables in print()
e = "repeat"
f = 5
print("Let's %s string formatting %d times." % (e, f))
# String.format()
four_par = "String {} {} {} {}"
print(four_par.format(1, 2, 3, 4))
print(four_par.format('a', 'b', 'c', 'd'))
# More strings
print("""Three double quotes
are needed to delimit strings in multiple lines.
You can have as many lines as you wish.""")
a = "I am 5'11\" tall"
b = 'I am 5\'11" tall'
print("\t" + a + "\n\t" + b)
# Functions
def nopar():
print("No parameters")
def add(a, b):
return a + b
# Call without arguments
nopar()
# Call with variables and math
a = 10
b = 20
add(a / 5, b / 4)
# if..elif..else
a = 10
b = 20
c = 30
if a > b:
print("a > b")
elif a > c:
print("a > c")
elif (b < c):
print("b < c")
if a < c:
print("a < c")
if b in range(10, 30):
print("b is between a and c")
else:
print("a is less than b and less than c")
# List and loops
animals = ["bee", "whale", "cow"]
nums = []
for animal in animals:
print("Animal: ", animal)
for i in range(2, 5):
nums.append(i)
print(nums)
i = 1
while i <= 3:
print(i)
i = i + 1
# Dictionary
CtyCou = {
"Paris": "France",
"Tokyo": "Japan",
"Lagos": "Nigeria"}
for city, country in CtyCou.items():
print("{0} is in {1}.".format(city, country))
# Demo graphics
# Imports
import numpy as np
import pandas as pd
import pyodbc
import matplotlib.pyplot as plt
# Reading the data from SQL Server
con = pyodbc.connect('DSN=AWDW;UID=RUser;PWD=<PASSWORD>')
query = """SELECT CustomerKey,
Age, YearlyIncome,
CommuteDistance, BikeBuyer
FROM dbo.vTargetMail;"""
TM = | pd.read_sql(query, con) | pandas.read_sql |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df=pd.read_feather('../train_basic_features.pkl')
test_df=pd.read_feather('../test__basic_features.pkl')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
catCols = ['user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type']
train_df, test_df = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 10, minCount = 0)
train_df.to_feather('../train_cat_targetenc.pkl')
test_df.to_feather('../test_cat_targetenc.pkl')
################################################################
# Tfidf - part 2
################################################################
import os; os.environ['OMP_NUM_THREADS'] = '1'
from sklearn.decomposition import TruncatedSVD
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
import time
from typing import List, Dict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
stopwords = stopwords.words('russian')
train_per=pd.read_csv('../input/train_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test['params'] = train_test['param_1'].fillna('') + ' ' + train_test['param_2'].fillna('') + ' ' + train_test['param_3'].fillna('')
import re
train_test.drop(['param_1', 'param_2', 'param_3'], axis = 'columns', inplace=True)
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
import re
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf2.pkl")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 10000,max_df=.4,#min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf3.pkl")
del(train_test); gc.collect()
train_per=pd.read_csv('../input/train_active.csv', usecols = ['title'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['title'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test.fillna('NA', inplace=True)
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf2.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf3.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
del(train_test); gc.collect()
###Too slow###
'''
train_per=pd.read_csv('../input/train_active.csv', usecols = ['description'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['description'])#,'title','description'])
train_per.fillna(' ', inplace=True)
test_per.fillna(' ', inplace=True)
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, stop_words = stopwords#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf2.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf3.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
'''
##########################################
# 13. Chargram -- too slow
##########################################
from collections import Counter
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
def char_ngrams(s):
s = s.lower()
s = s.replace(u' ', '')
result = Counter()
len_s = len(s)
for n in [3, 4, 5]:
result.update(s[i:i+n] for i in range(len_s - n + 1))
return ' '.join(list(result))
data = pd.concat((train_df, test_df), axis = 'rows')
data['param_chargram'] = list(data[['params']].apply(lambda x: char_ngrams(x['params']), axis=1))
data['title_chargram'] = list(data[['title']].apply(lambda x: char_ngrams(x['title']), axis=1))
#data['desc_chargram'] = list(data[['description']].apply(lambda x: char_ngrams(x['description']), axis=1))
#data['count_common_chargram'] = data.apply(lambda x: len(set(str(x['title_chargram']).lower().split()).intersection(set(str(x['desc_chargram']).lower().split()))), axis=1)
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
#####Chargram -TFIDF
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['title_chargram'].values.tolist() + test_df['title_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../title_chargram_tfidf.pkl')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['param_chargram'].values.tolist() + test_df['param_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['param_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['param_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../param_chargram_tfidf.pkl')
#######Chargram of Cat and Parent cat
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
train_df = pd.read_feather('../train_basic_features.pkl')
test_df = pd.read_feather('../test__basic_features.pkl')
data = pd.concat([train_df, test_df], axis= 'rows')
data['categories'] = data["parent_category_name"].fillna(' ') + data["category_name"].fillna(' ')
data['cat_chargram'] = list(data[['categories']].apply(lambda x: char_ngrams(x['categories']), axis=1))
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 1000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['cat_chargram'].values.tolist() + test_df['cat_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['cat_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['cat_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../cat_chargram_tfidf.pkl')
##############################
## New Kaggle Ftr
##############################
import pandas as pd
import gc
used_cols = ['item_id', 'user_id']
train = pd.read_csv('../input/train.csv', usecols=used_cols)
train_active = pd.read_csv('../input/train_active.csv', usecols=used_cols)
test = pd.read_csv('../input/test.csv', usecols=used_cols)
test_active = pd.read_csv('../input/test_active.csv', usecols=used_cols)
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])
test_periods = pd.read_csv('../input/periods_test.csv', parse_dates=['date_from', 'date_to'])
train.head()
all_samples = pd.concat([
train,
train_active,
test,
test_active
]).reset_index(drop=True)
all_samples.drop_duplicates(['item_id'], inplace=True)
del train_active
del test_active
gc.collect()
all_periods = pd.concat([
train_periods,
test_periods
])
del train_periods
del test_periods
gc.collect()
all_periods.head()
all_periods['days_up'] = (all_periods['date_to'] - all_periods['date_from']).dt.days
gp = all_periods.groupby(['item_id'])[['days_up']]
gp_df = pd.DataFrame()
gp_df['days_up_sum'] = gp.sum()['days_up']
gp_df['times_put_up'] = gp.count()['days_up']
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={'index': 'item_id'})
gp_df.head()
all_periods.drop_duplicates(['item_id'], inplace=True)
all_periods = all_periods.merge(gp_df, on='item_id', how='left')
all_periods.head()
del gp
del gp_df
gc.collect()
all_periods = all_periods.merge(all_samples, on='item_id', how='left')
all_periods.head()
gp = all_periods.groupby(['user_id'])[['days_up_sum', 'times_put_up']].mean().reset_index() \
.rename(index=str, columns={
'days_up_sum': 'avg_days_up_user',
'times_put_up': 'avg_times_up_user'
})
gp.head()
n_user_items = all_samples.groupby(['user_id'])[['item_id']].count().reset_index() \
.rename(index=str, columns={
'item_id': 'n_user_items'
})
gp = gp.merge(n_user_items, on='user_id', how='left')
gp.head()
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.merge(gp, on='user_id', how='left')
test = test.merge(gp, on='user_id', how='left')
agg_cols = list(gp.columns)[1:]
del gp
gc.collect()
train.head()
train = train[['avg_days_up_user','avg_times_up_user','n_user_items']]
test = test[['avg_days_up_user','avg_times_up_user','n_user_items']]
train.to_feather('../train_kag_agg_ftr.ftr')
test.to_feather('../test_kag_agg_ftr.ftr')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = | pd.DataFrame(enc_mat_test) | pandas.DataFrame |
import glob
from astropy.io import ascii
from astropy.table import Table
from astropy.io import fits
from astropy.time import Time
import os
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import pandas as pd
# Create the dataframe to be filled later :
Source_Name_l=[]
BID_l=[]
SID_l=[]
OBS_ID_l=[]
MJD_OBS_l=[]
DATE_OBS_l=[]
exp_l=[]
NH_l=[]
BB_kT_l=[]
min_BBkT_l=[]
max_BBkt_l=[]
BB_Norm_l=[]
min_BBNorm_l=[]
max_BBNorm_l=[]
BB_flux_l=[]
min_BBflux_l=[]
max_BBflux_l=[]
Bol_BBF_l=[]
min_BolBBF_l=[]
max_BolBBF_l=[]
BB2_kT_l=[]
min_2BBkT_l=[]
max_2BBkt_l=[]
BB2_Norm_l=[]
min_2BBNorm_l=[]
max_2BBNorm_l=[]
BB2_flux_l=[]
min_2BBflux_l=[]
max_2BBflux_l=[]
Bol_2BBF_l=[]
min_2BolBBF_l=[]
max_2BolBBF_l=[]
fa_l=[]
min_fa_l=[]
max_fa_l=[]
dbb_kT_l=[]
min_dbbkT_l=[]
max_dbbkT_l=[]
dbb_Norm_l=[]
min_dbbNorm_l=[]
max_dbbNorm_l=[]
dbb_flux_l=[]
min_dbbflux_l=[]
max_dbbflux_l=[]
sBB_kT_l=[]
min_sBBkT_l=[]
max_sBBkt_l=[]
sBB_Norm_l=[]
min_sBBNorm_l=[]
max_sBBNorm_l=[]
sBB_flux_l=[]
min_sBBflux_l=[]
max_sBBflux_l=[]
RStat_l=[]
dof_l=[]
# Enter here the name of the source as in the burster_v3f.dat :
source_name = '4U_1608-522'
bid = input('enter here the burstid of the burst you would like to fit : ')
bid = '3'
mine=input('Enter the minimum energy of the fits :')
mines = ":"+mine
maxe=input('Enter the maximum energy of the fits :')
maxes = maxe+":"
folder = '/home/hea/ownCloud/burst_characterization_v3/'
sfolder = '/home/hea/ownCloud/burst_characterization_v3/scripts/'
#folder = '/Users/tolga/ownCloud/burst_characterization_v3/'
#sfolder = '/Users/tolga/ownCloud/burst_characterization_v3/scripts/'
# Read the persistent state analysis (pre burst tbabs*DISKBB+BBODYRAD fit)
pers_file = folder+source_name+'/pers_results.dat'
pers_data = pd.read_csv(pers_file)
snh = pers_data['NH']
diskbb_temp = pers_data['disk_kT']
diskbb_norm = pers_data['disk_norm']
sbb_kt=pers_data['bb_kT']
sbb_norm=pers_data['bb_norm']
pers_rstat = pers_data['chi']
pers_dof = pers_data['dof']
sel_pre_burst = np.where(pers_data['BID']==int(bid))
ssnh = snh[sel_pre_burst[0]]
sdiskbb_temp = diskbb_temp[sel_pre_burst[0]]
sdiskbb_norm = diskbb_norm[sel_pre_burst[0]]
ssbb_kt = sbb_kt[sel_pre_burst[0]]
ssbb_norm = sbb_norm[sel_pre_burst[0]]
spers_rstat= pers_rstat[sel_pre_burst[0]]
spers_dof = pers_dof[sel_pre_burst[0]]
print('These are the values I get from persistent state analysis :')
print('NH ='+str(ssnh.values[0]))
print('Disk BB kT = '+str(sdiskbb_temp.values[0]))
print('Disk BB Norm = '+str(sdiskbb_norm.values[0]))
print('Surface BB kT = '+str(ssbb_kt.values[0]))
print('Surface BB Norm = '+str(ssbb_norm.values[0]))
print('Reduced Chi2 of = '+str(spers_rstat.values[0]/spers_dof.values[0]))
print('available fit_methods \n')
print('1=fixed background just BB free \n')
#fit_method = input('Please enter your fit preference : ')
fit_method = '1'
burst_folder=folder+source_name+'/burst'+bid+'/'
bkg_folder = folder+source_name+'/burst'+bid+'/pers_analysis/'
bkgfile = glob.glob(bkg_folder+'*3c50*.pha.pi')
sp_list = np.array(glob.glob(burst_folder+'c*.pha'))
pha_count = len(sp_list)
set_stat("chi2xspecvar")
set_covar_opt("sigma",1.0)
set_conf_opt('numcores', 10)
set_conf_opt("max_rstat",250.0)
set_covar_opt('sigma',1.0)
for i in range(len(sp_list)-1):
sp_final = sp_list[i]
print(sp_final)
sp_hdu = fits.open(str(sp_final))
print('read src spec: '+str(sp_final))
mjdobs = sp_hdu[1].header['MJD-OBS']
date_obsi = sp_hdu[1].header['DATE-OBS']
exposure = sp_hdu[1].header['EXPOSURE']
obsid = sp_hdu[1].header['OBS_ID']
sid = sp_final.split('/')[7].split('_')[0][1:]
date_obs = str(Time(date_obsi,format='isot', scale='utc'))
print(date_obs)
print(obsid)
object = sp_hdu[1].header['OBJECT']
Source_Name_l.append(object)
BID_l.append(bid)
SID_l.append(sid)
OBS_ID_l.append(obsid)
MJD_OBS_l.append(mjdobs)
DATE_OBS_l.append(date_obs)
exp_l.append(exposure)
NH_l.append(ssnh.values[0])
if exposure == 0.0 :
print('this spectrum does not have any exposure')
BB_kT_l.append(0)
min_BBkT_l.append(0)
max_BBkt_l.append(0)
BB_Norm_l.append(0)
min_BBNorm_l.append(0)
max_BBNorm_l.append(0)
BB_flux_l.append(0)
min_BBflux_l.append(0)
max_BBflux_l.append(0)
Bol_BBF_l.append(0)
min_BolBBF_l.append(0)
max_BolBBF_l.append(0)
BB2_kT_l.append(0)
min_2BBkT_l.append(0)
max_2BBkt_l.append(0)
BB2_Norm_l.append(0)
min_2BBNorm_l.append(0)
max_2BBNorm_l.append(0)
BB2_flux_l.append(0)
min_2BBflux_l.append(0)
max_2BBflux_l.append(0)
Bol_2BBF_l.append(0)
min_2BolBBF_l.append(0)
max_2BolBBF_l.append(0)
fa_l.append(0)
min_fa_l.append(0)
max_fa_l.append(0)
dbb_kT_l.append(0)
min_dbbkT_l.append(0)
max_dbbkT_l.append(0)
dbb_Norm_l.append(0)
min_dbbNorm_l.append(0)
max_dbbNorm_l.append(0)
dbb_flux_l.append(0)
min_dbbflux_l.append(0)
max_dbbflux_l.append(0)
sBB_kT_l.append(0)
min_sBBkT_l.append(0)
max_sBBkt_l.append(0)
sBB_Norm_l.append(0)
min_sBBNorm_l.append(0)
max_sBBNorm_l.append(0)
sBB_flux_l.append(0)
min_sBBflux_l.append(0)
max_sBBflux_l.append(0)
RStat_l.append(0)
dof_l.append(0)
print('Finished:')
print(sp_final)
continue
# print(date_obsi)
load_pha(1, str(sp_final),use_errors=True)
load_arf(1, sfolder+'nicer-consim135p-teamonly-array52.arf')
load_rmf(1, sfolder+'nicer-rmf6s-teamonly-array52.rmf')
print('Ignoring : '+mines+' '+maxes)
ignore(mines+','+maxes)
print('This script only subtracts ni3c50 background')
load_bkg(1, bkgfile[0],use_errors=True)
subtract()
print('Grouping the data to have at least 50 counts per channel')
group_counts(1, 50)
# first let's do a global source definition :
set_source(xstbabs.tb*(xsdiskbb.dbb+xsbbodyrad.sbb))
tb.nH=ssnh.values[0]
dbb.Tin = sdiskbb_temp.values[0]
dbb.norm = sdiskbb_norm.values[0]
sbb.kT = ssbb_kt.values[0]
sbb.norm = ssbb_norm.values[0]
freeze(tb.nH)
freeze(dbb.Tin)
freeze(dbb.norm)
freeze(sbb.kT)
freeze(sbb.norm)
#fit()
initial_rstat = sum(calc_chisqr())/len(calc_chisqr())
if (initial_rstat < (spers_rstat.values[0]+3.50)/spers_dof.values[0]):
print('Current chi2 : '+str(initial_rstat))
print('Persistent chi2 : '+str(spers_rstat.values[0]/spers_dof.values[0]))
print('Deviation from the persistent emission is small I will thaw the parameters and refit the data to save the best fit values:')
thaw(dbb.Tin)
thaw(dbb.norm)
thaw(sbb.kT)
thaw(sbb.norm)
fit()
if get_fit_results().dof <= 0.0:
print('The degree of freedom is very small we need to skip this spectrum:')
dbb_kT_l.append(0)
min_dbbkT_l.append(0)
max_dbbkT_l.append(0)
dbb_Norm_l.append(0)
min_dbbNorm_l.append(0)
max_dbbNorm_l.append(0)
dbb_flux_l.append(0)
min_dbbflux_l.append(0)
max_dbbflux_l.append(0)
sBB_kT_l.append(0)
min_sBBkT_l.append(0)
max_sBBkt_l.append(0)
sBB_Norm_l.append(0)
min_sBBNorm_l.append(0)
max_sBBNorm_l.append(0)
sBB_flux_l.append(0)
min_sBBflux_l.append(0)
max_sBBflux_l.append(0)
BB_kT_l.append(0)
min_BBkT_l.append(0)
max_BBkt_l.append(0)
BB_Norm_l.append(0)
min_BBNorm_l.append(0)
max_BBNorm_l.append(0)
BB_flux_l.append(0)
min_BBflux_l.append(0)
max_BBflux_l.append(0)
Bol_BBF_l.append(0)
min_BolBBF_l.append(0)
max_BolBBF_l.append(0)
BB2_kT_l.append(0)
min_2BBkT_l.append(0)
max_2BBkt_l.append(0)
BB2_Norm_l.append(0)
min_2BBNorm_l.append(0)
max_2BBNorm_l.append(0)
BB2_flux_l.append(0)
min_2BBflux_l.append(0)
max_2BBflux_l.append(0)
Bol_2BBF_l.append(0)
min_2BolBBF_l.append(0)
max_2BolBBF_l.append(0)
fa_l.append(0)
min_fa_l.append(0)
max_fa_l.append(0)
RStat_l.append(0)
dof_l.append(get_fit_results().dof)
print('Finished:')
print(sp_final)
continue
covar()
chi = get_fit_results().statval
dof = get_fit_results().dof
parvals = np.array(get_covar_results().parvals)
parnames = np.array(get_covar_results().parnames)
parmins = np.array(get_covar_results().parmins)
parmaxes = np.array(get_covar_results().parmaxes)
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == True or (None in cparmaxes) == True or (0 in cparmaxes) == True or (0 in cparmins) == True:
print('It seems like you have unconstrained parameters in the continuum model therefore we cant calculate the errors')
print('No flux will be reported')
dbb_flux_l.append(0)
max_dbbflux_l.append(0)
min_dbbflux_l.append(0)
sBB_flux_l.append(0)
max_sBBflux_l.append(0)
min_sBBflux_l.append(0)
print('Parameter Errors will be written as 0')
dbb_kT_l.append(get_fit_results().parvals[0])
min_dbbkT_l.append(0)
max_dbbkT_l.append(0)
dbb_Norm_l.append(get_fit_results().parvals[1])
min_dbbNorm_l.append(0)
max_dbbNorm_l.append(0)
sBB_kT_l.append(get_fit_results().parvals[2])
min_sBBkT_l.append(0)
max_sBBkt_l.append(0)
sBB_Norm_l.append(get_fit_results().parvals[3])
min_sBBNorm_l.append(0)
max_sBBNorm_l.append(0)
#elif ((None in cparmins) == False and (None in cparmaxes) == False) or ((0 in cparmaxes) == False and (0 in cparmins) == False):
else:
print('The parameters are constrained well calculating errors')
#matrix = get_covar_results().extra_output
#is_all_zero = np. all((matrix > 0))
#if is_all_zero:
sample2=sample_flux(dbb,float(mine),float(maxe), num=100, correlated=True,confidence=68)
dbb_flux_l.append(sample2[1][0])
max_dbbflux_l.append(sample2[1][1]-sample2[1][0])
min_dbbflux_l.append(sample2[1][0]-sample2[1][2])
sample3=sample_flux(sbb,float(mine),float(maxe), num=100, correlated=True,confidence=68)
sBB_flux_l.append(sample3[1][0])
max_sBBflux_l.append(sample3[1][1]-sample3[1][0])
min_sBBflux_l.append(sample3[1][0]-sample3[1][2])
# Parameter errors will be written as they are :
dbb_kT_l.append(get_covar_results().parvals[0])
min_dbbkT_l.append(get_covar_results().parmins[0])
max_dbbkT_l.append(get_covar_results().parmaxes[0])
dbb_Norm_l.append(get_covar_results().parvals[1])
min_dbbNorm_l.append(get_covar_results().parmins[1])
max_dbbNorm_l.append(get_covar_results().parmaxes[1])
sBB_kT_l.append(get_covar_results().parvals[2])
min_sBBkT_l.append(get_covar_results().parmins[2])
max_sBBkt_l.append(get_covar_results().parmaxes[2])
sBB_Norm_l.append(get_covar_results().parvals[3])
min_sBBNorm_l.append(get_covar_results().parmins[3])
max_sBBNorm_l.append(get_covar_results().parmaxes[3])
BB_kT_l.append(0)
min_BBkT_l.append(0)
max_BBkt_l.append(0)
BB_Norm_l.append(0)
min_BBNorm_l.append(0)
max_BBNorm_l.append(0)
BB_flux_l.append(0)
min_BBflux_l.append(0)
max_BBflux_l.append(0)
Bol_BBF_l.append(0)
min_BolBBF_l.append(0)
max_BolBBF_l.append(0)
BB2_kT_l.append(0)
min_2BBkT_l.append(0)
max_2BBkt_l.append(0)
BB2_Norm_l.append(0)
min_2BBNorm_l.append(0)
max_2BBNorm_l.append(0)
BB2_flux_l.append(0)
max_2BBflux_l.append(0)
min_2BBflux_l.append(0)
Bol_2BBF_l.append(0)
min_2BolBBF_l.append(0)
max_2BolBBF_l.append(0)
fa_l.append(0)
min_fa_l.append(0)
max_fa_l.append(0)
#dbb_kT_l.append(0)
#min_dbbkT_l.append(0)
#max_dbbkT_l.append(0)
#dbb_Norm_l.append(0)
#min_dbbNorm_l.append(0)
#max_dbbNorm_l.append(0)
#dbb_flux_l.append(0)
#min_dbbflux_l.append(0)
#max_dbbflux_l.append(0)
#sBB_kT_l.append(0)
#min_sBBkT_l.append(0)
#max_sBBkt_l.append(0)
#sBB_Norm_l.append(0)
#min_sBBNorm_l.append(0)
#max_sBBNorm_l.append(0)
#sBB_flux_l.append(0)
#min_sBBflux_l.append(0)
#max_sBBflux_l.append(0)
RStat_l.append(chi)
dof_l.append(dof)
plot_data()
x_max=max(get_data_plot().x)+max(get_data_plot().x)*0.05
x_min = np.abs(min(get_data_plot().x)-min(get_data_plot().x)*0.05)
ymax = max(get_data_plot().y)+max(get_data_plot().y)*0.2
ymin = np.abs(min(get_data_plot().y)-min(get_data_plot().y)*0.05)
plot_fit_delchi(1,clearwindow=True, color='Black')
fig=plt.gcf()
ax1,ax2=fig.axes
ax1.set_title(source_name+' BID:'+bid+' MJD:'+str(mjdobs)+' SID:'+sid)
ax1.set_yscale('log')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax2.set_xlabel('Energy [keV]', fontsize=14)
ax1.set_ylabel('Counts/sec/keV', fontsize=14)
ax2.set_ylabel('Sigma', fontsize=14)
ax1.set_xlim(x_min,x_max)
ax2.set_xlim(x_min,x_max)
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_'+mine+'_'+maxe+'.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_'+mine+'_'+maxe+'.png',orientation='landscape', papertype='a4')
plot_fit(1,clearwindow=True,xlog=True,ylog=True, color='Black')
plot_model_component("tb*dbb", replot=False, overplot=True, color='Green')
plot_model_component("tb*sbb", replot=False, overplot=True, color='Red')
plt.title(source_name+' BID:'+bid+' MJD:'+str(mjdobs)+' SID:'+sid)
plt.xlabel('Energy [keV]', fontsize=14)
plt.ylabel('Counts/sec/keV', fontsize=14)
plt.xlim(x_min,x_max)
plt.ylim(ymin,ymax)
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_comp_'+mine+'_'+maxe+'.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_comp_'+mine+'_'+maxe+'.png',orientation='landscape', papertype='a4')
print('We are done with just fitting the continuum, Skipping to the next spectrum')
continue
else:
print('Current chi2 : '+str(initial_rstat))
print('Persistent chi2 : '+str(spers_rstat.values[0]/spers_dof.values[0]))
print('Simple Persistent Emission Does Not Fit the data we need to add components')
print('This is 1=fixed background just BB free')
set_source(xstbabs.tb*(xsbbodyrad.bb+xsdiskbb.dbb+xsbbodyrad.sbb))
tb.nH=ssnh.values[0]
dbb.Tin = sdiskbb_temp.values[0]
dbb.norm = sdiskbb_norm.values[0]
sbb.kT = ssbb_kt.values[0]
sbb.norm = ssbb_norm.values[0]
freeze(tb.nH)
freeze(dbb.Tin)
freeze(dbb.norm)
freeze(sbb.kT)
freeze(sbb.norm)
dbb_kT_l.append(diskbb_temp.values[0])
min_dbbkT_l.append(0)
max_dbbkT_l.append(0)
dbb_Norm_l.append(diskbb_norm.values[0])
min_dbbNorm_l.append(0)
max_dbbNorm_l.append(0)
dbb_flux_l.append(0)
min_dbbflux_l.append(0)
max_dbbflux_l.append(0)
sBB_kT_l.append(ssbb_kt.values[0])
min_sBBkT_l.append(0)
max_sBBkt_l.append(0)
sBB_Norm_l.append(ssbb_norm.values[0])
min_sBBNorm_l.append(0)
max_sBBNorm_l.append(0)
sBB_flux_l.append(0)
min_sBBflux_l.append(0)
max_sBBflux_l.append(0)
fa_l.append(0)
min_fa_l.append(0)
max_fa_l.append(0)
BB2_kT_l.append(0)
min_2BBkT_l.append(0)
max_2BBkt_l.append(0)
BB2_Norm_l.append(0)
min_2BBNorm_l.append(0)
max_2BBNorm_l.append(0)
Bol_2BBF_l.append(0)
max_2BolBBF_l.append(0)
min_2BolBBF_l.append(0)
BB2_flux_l.append(0)
max_2BBflux_l.append(0)
min_2BBflux_l.append(0)
bb.kt=0.5
set_xsabund('wilm')
bb.norm = 180.3
set_method("moncar")
fit()
set_method("levmar")
fit()
chi = get_fit_results().statval
dof = get_fit_results().dof
if get_fit_results().dof <= 0.0:
print('The degree of freedom is very small we need to skip this spectrum:')
BB_kT_l.append(0)
min_BBkT_l.append(0)
max_BBkt_l.append(0)
BB_Norm_l.append(0)
min_BBNorm_l.append(0)
max_BBNorm_l.append(0)
BB_flux_l.append(0)
min_BBflux_l.append(0)
max_BBflux_l.append(0)
Bol_BBF_l.append(0)
min_BolBBF_l.append(0)
max_BolBBF_l.append(0)
RStat_l.append(0)
dof_l.append(get_fit_results().dof)
print('Finished:')
print(sp_final)
continue
fit()
covar()
chi = get_fit_results().statval
dof = get_fit_results().dof
RStat_l.append(chi)
dof_l.append(dof)
parvals = np.array(get_covar_results().parvals)
parnames = np.array(get_covar_results().parnames)
parmins = np.array(get_covar_results().parmins)
parmaxes = np.array(get_covar_results().parmaxes)
# now the model unabsorbed fluxes :
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == True or (None in cparmaxes) == True or (0 in cparmaxes) == True or (0 in cparmins) == True:
print('It seems like you have unconstrained parameters we cant calculate errors')
print('No flux will be reported')
BB_flux_l.append(0)
max_BBflux_l.append(0)
min_BBflux_l.append(0)
Bol_BBF_l.append(0)
min_BolBBF_l.append(0)
max_BolBBF_l.append(0)
# Parameter Errors will be written as 0:
BB_kT_l.append(get_covar_results().parvals[0])
min_BBkT_l.append(0)
max_BBkt_l.append(0)
BB_Norm_l.append(get_covar_results().parvals[1])
min_BBNorm_l.append(0)
max_BBNorm_l.append(0)
else:
#elif ((None in cparmins) == False and (None in cparmaxes) == False) or ((0 in cparmaxes) == False and (0 in cparmins) == False):
print('The parameters are constrained well calculating errors')
# matrix = get_covar_results().extra_output
# is_all_zero = np. all((matrix > 0))
# if is_all_zero:
sample1=sample_flux(bb,float(mine),float(maxe), num=100, correlated=True,confidence=68)
BB_flux_l.append(sample1[1][0])
max_BBflux_l.append(sample1[1][1]-sample1[1][0])
min_BBflux_l.append(sample1[1][0]-sample1[1][2])
print('Parameter errors will be written as they are')
BB_kT_l.append(get_covar_results().parvals[0])
min_BBkT_l.append(get_covar_results().parmins[0])
max_BBkt_l.append(get_covar_results().parmaxes[0])
BB_Norm_l.append(get_covar_results().parvals[1])
min_BBNorm_l.append(get_covar_results().parmins[1])
max_BBNorm_l.append(get_covar_results().parmaxes[1])
# Now the Bolometric Fluxes :
sample_bol=sample_flux(bb,0.01,200.0, num=100, correlated=True,confidence=68)
Bol_BBF_l.append(sample_bol[1][0])
max_BolBBF_l.append(sample_bol[1][1]-sample_bol[1][0])
min_BolBBF_l.append(sample_bol[1][0]-sample_bol[1][2])
#Bol_BBF_l.append(1.076e-11*((get_covar_results().parvals[0])**4.0)*get_covar_results().parvals[1])
#max_BolBBF_l.append(1.076e-11*((get_covar_results().parvals[0]+get_covar_results().parmaxes[0])**4.0)*(get_covar_results().parvals[1]+get_covar_results().parmaxes[1]))
#min_BolBBF_l.append(1.076e-11*((get_covar_results().parvals[0]-get_covar_results().parmins[0])**4.0)*(get_covar_results().parvals[1]-get_covar_results().parmins[1]))
#RStat_l.append(chi)
#dof_l.append(dof)
#else:
# print('It seems like you have unconstrained parameters we cant calculate errors')
# print('No flux will be reported')
# BB_flux_l.append(0)
# max_BBflux_l.append(0)
# min_BBflux_l.append(0)
# Bol_BBF_l.append(0)
# min_BolBBF_l.append(0)
# max_BolBBF_l.append(0)
# # Parameter Errors will be written as 0:
# BB_kT_l.append(get_covar_results().parvals[0])
# min_BBkT_l.append(0)
# max_BBkt_l.append(0)
# BB_Norm_l.append(get_covar_results().parvals[1])
# min_BBNorm_l.append(0)
# max_BBNorm_l.append(0)
plot_data()
x_max=max(get_data_plot().x)+max(get_data_plot().x)*0.1
x_min = np.abs(min(get_data_plot().x)-min(get_data_plot().x)*0.1)
ymax = max(get_data_plot().y)+max(get_data_plot().y)*0.2
ymin = np.abs(min(get_data_plot().y)-min(get_data_plot().y)*0.05)
plot_fit_delchi(1,clearwindow=True, color='Black')
fig=plt.gcf()
ax1,ax2=fig.axes
ax1.set_title(source_name+' BID:'+bid+' MJD:'+str(mjdobs)+' SID:'+sid)
ax1.set_yscale('log')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax2.set_xlabel('Energy [keV]', fontsize=14)
ax1.set_ylabel('Counts/sec/keV', fontsize=14)
ax2.set_ylabel('Sigma', fontsize=14)
ax1.set_xlim(x_min,x_max)
ax2.set_xlim(x_min,x_max)
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_'+mine+'_'+maxe+'.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_'+mine+'_'+maxe+'.png',orientation='landscape', papertype='a4')
plot_fit(1,clearwindow=True,xlog=True,ylog=True, color='Black')
plot_model_component("tb*dbb", replot=False, overplot=True, color='Green')
plot_model_component("tb*sbb", replot=False, overplot=True, color='Red')
plot_model_component("tb*bb", replot=False, overplot=True, color='Black')
plt.title(source_name+' BID:'+bid+' MJD:'+str(mjdobs)+' SID:'+sid)
plt.xlabel('Energy [keV]', fontsize=14)
plt.ylabel('Counts/sec/keV', fontsize=14)
plt.xlim(x_min,x_max)
plt.ylim(ymin,ymax)
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_comp_'+mine+'_'+maxe+'.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+sid+'_b'+bid+'_'+fit_method+'_full_comp_'+mine+'_'+maxe+'.png',orientation='landscape', papertype='a4')
items = {'Source_Name' : Source_Name_l, 'BID' : BID_l, 'SID': SID_l,'OBS_ID' : OBS_ID_l,'MJD-OBS' : MJD_OBS_l,'DATE-OBS' : OBS_ID_l,'exp' :exp_l,'NH' : NH_l,
'BB_kT' : BB_kT_l,'min_BBkT' : min_BBkT_l,'max_BBkt' : max_BBkt_l,'BB_Norm' : BB_Norm_l,'min_BBNorm' : min_BBNorm_l,'max_BBNorm' : max_BBNorm_l,
'BB_flux' : BB_flux_l,'min_BBflux' : min_BBflux_l,
'max_BBflux' : max_BBflux_l,'Bol_BBF' : Bol_BBF_l,'min_BolBBF' : min_BolBBF_l,'max_BolBBF' : max_BolBBF_l,'2BB_kT' : BB2_kT_l,
'min_2BBkT' : min_2BBkT_l,'max_2BBkt' : max_2BBkt_l,'2BB_Norm' : BB2_Norm_l,
'min_2BBNorm' : min_2BBNorm_l,'max_2BBNorm' : max_2BBNorm_l,'2BB_flux' : BB2_flux_l,'min_2BBflux' : min_2BBflux_l,'max_2BBflux' : max_2BBflux_l,'Bol_2BBF' : Bol_2BBF_l,'min_2BolBBF' : min_2BolBBF_l,
'max_2BolBBF' : max_2BolBBF_l,'fa' : fa_l,'min_fa' : min_fa_l,'max_fa' : max_fa_l,'dbb_kT' : dbb_kT_l,'min_dbbkT' : min_dbbkT_l,'max_dbbkT' : max_dbbkT_l,'dbb_Norm' : dbb_Norm_l,'min_dbbNorm' : min_dbbNorm_l,
'max_dbbNorm' : max_dbbNorm_l,'dbb_flux' : dbb_flux_l,'min_dbbflux' : min_dbbflux_l,'max_dbbflux' : max_dbbflux_l,
'sBB_kT' : sBB_kT_l,'min_sBBkT' : min_sBBkT_l,'max_sBBkt' : max_sBBkt_l,'sBB_Norm' : sBB_Norm_l,'min_sBBNorm' : min_sBBNorm_l,
'max_sBBNorm' : max_sBBNorm_l,'sBB_flux' : sBB_flux_l,
'min_sBBflux' : min_sBBflux_l,'max_sBBflux' : max_sBBflux_l,
'RStat' : RStat_l,'dof' : dof_l}
#print(len(Source_Name_l))
print('Sourcename:',len(Source_Name_l))
print('BID:',len(BID_l))
print('SID:',len(SID_l))
print('OBSID:',len(OBS_ID_l))
print('MJD:',len(MJD_OBS_l))
print('exp:',len(exp_l))
print('NH:',len(NH_l))
print('BB_kT:',len(BB_kT_l))
print('min_BBkT:',len(min_BBkT_l))
print('max_BBkt:',len(max_BBkt_l))
print('BB_Norm:',len(BB_Norm_l))
print('min_BBNorm:',len(min_BBNorm_l))
print('max_BBNorm:',len(max_BBNorm_l))
print('BB_flux:',len(BB_flux_l))
print('min_BBflux:',len(min_BBflux_l))
print('max_BBflux:',len(max_BBflux_l))
print('Bol_BBF:',len(Bol_BBF_l))
print('min_BolBBF:',len(min_BolBBF_l))
print('max_BolBBF:',len(max_BolBBF_l))
print('BB2_kT:',len(BB2_kT_l))
print('min_2BBkT:',len(min_2BBkT_l))
print('max_2BBkt:',len(max_2BBkt_l))
print('BB2_Norm:',len(BB2_Norm_l))
print('min_2BBNorm:',len(min_2BBNorm_l))
print('max_2BBNorm:',len(max_2BBNorm_l))
print('BB2_flux:',len(BB2_flux_l))
print('min_2BBflux:',len(min_2BBflux_l))
print('max_2BBflux:',len(max_2BBflux_l))
print('Bol_2BBF:',len(Bol_2BBF_l))
print('min_2BolBBF:',len(min_2BolBBF_l))
print('max_2BolBBF:',len(max_2BolBBF_l))
print('fa:',len(fa_l))
print('min_fa:',len(min_fa_l))
print('max_fa:',len(max_fa_l))
print('dbb_kT:',len(dbb_kT_l))
print('min_dbbkT:',len(min_dbbkT_l))
print('max_dbbkT:',len(max_dbbkT_l))
print('dbb_Norm:',len(dbb_Norm_l))
print('min_dbbNorm:',len(min_dbbNorm_l))
print('max_dbbNorm:',len(max_dbbNorm_l))
print('dbb_flux:',len(dbb_flux_l))
print('min_dbbflux:',len(min_dbbflux_l))
print('max_dbbflux:',len(max_dbbflux_l))
print('sBB_kT:',len(sBB_kT_l))
print('min_sBBkT:',len(min_sBBkT_l))
print('max_sBBkt:',len(max_sBBkt_l))
print('sBB_Norm:',len(sBB_Norm_l))
print('min_sBBNorm:',len(min_sBBNorm_l))
print('max_sBBNorm:',len(max_sBBNorm_l))
print('sBB_flux:',len(sBB_flux_l))
print('min_sBBflux:',len(min_sBBflux_l))
print('max_sBBflux:',len(max_sBBflux_l))
print('Rstat:',len(RStat_l))
print('Dof:',len(dof_l))
#for i in check_list:
#print(i,str(len(i)))
df= | pd.DataFrame.from_dict(items) | pandas.DataFrame.from_dict |
import sys
import pandas as pd
not_included_gene_file = './not_included_genes.txt'
gene_file = '../../ecoli_refgene/ecoli_refgene.txt'
raw_file = './raw_data.txt'
# load data
with open(not_included_gene_file) as f:
not_included_genes = f.readlines()
not_included_genes = [row.strip() for row in not_included_genes]
pd_refgenes = pd.read_csv(gene_file, sep='\t')
genes = []
for name, name2 in zip(pd_refgenes['name'], pd_refgenes['name2']):
if name not in not_included_genes or name2 not in not_included_genes:
genes.append(name2)
antibiotics = []
knowledge = []
triples = []
first = True
pd_raw_data = | pd.read_csv(raw_file, sep='\t') | pandas.read_csv |
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import pandas as pd
import cvxpy as cp
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tools.tools import add_constant
from scipy.stats import norm
# User
from .exceptions import WrongInputException
###############################################################################
# Main
###############################################################################
#------------------------------------------------------------------------------
# Tools
#------------------------------------------------------------------------------
def get_colnames(x,prefix="X"):
try:
dim = x.shape[1]
colnames = [prefix+str(j) for j in np.arange(start=1,stop=dim+1)]
except IndexError:
colnames = [prefix]
return colnames
def convert_to_dict_series(Yobs=None,Ytrue=None,Y0=None,Y1=None,W=None):
# Save local arguments
args = locals()
# Convert values to series with appropriate names
args = {k: pd.Series(v, name=k) for k,v in args.items() if v is not None}
return args
def convert_to_dict_df(X=None):
# Save local arguments
args = locals()
# Convert values to series with appropriate names
args = {k: pd.DataFrame(v, columns=get_colnames(x=v,prefix=k)) for k,v in args.items() if v is not None}
return args
def convert_normal_to_uniform(x, mu="infer", sigma="infer", lower_bound=0, upper_bound=1, n_digits_round=2):
""" See link: https://math.stackexchange.com/questions/2343952/how-to-transform-gaussiannormal-distribution-to-uniform-distribution
"""
# Convert to np and break link
x = np.array(x.copy())
if mu=="infer":
mu = np.mean(x, axis=0).round(n_digits_round)
if sigma=="infer":
sigma = np.sqrt(np.var(x, axis=0)).round(n_digits_round)
# Get CDF
x_cdf = norm.cdf(x=x, loc=mu, scale=sigma)
# Transform
x_uni = (upper_bound-lower_bound)*x_cdf - lower_bound
return x_uni
#------------------------------------------------------------------------------
# Generate X data
#------------------------------------------------------------------------------
def generate_ar_process(T=100, x_p=5, ar_p=3, burnin=50, **kwargs):
# Extract/generate initial coeffients of X
mu = kwargs.get('mu', 0)
sigma = kwargs.get('sigma', 1)
## Extract/generate parameters for AR
const = kwargs.get('const', 0)
ar_coefs = kwargs.get('ar_coefs', np.linspace(start=0.5, stop=0, num=ar_p, endpoint=False))
error_coef = kwargs.get('error_coef', 1)
# Fix AR coefs; flip order and reshape to comformable shape
ar_coefs = np.flip(ar_coefs).reshape(-1,1)
# Generate errors
errors = kwargs.get('errors', np.random.multivariate_normal(mean=np.ones(x_p),
cov=np.identity(x_p),
size=T))
# Generate errors for burn-in period
errors_burnin = np.random.multivariate_normal(mean=np.mean(errors,axis=0),
cov=np.cov(errors.T),
size=burnin)
errors_all = np.concatenate((errors_burnin,errors))
# Generate initial value(s)
X = mu + sigma * np.random.randn(ar_p,x_p)
# Simulate AR(p) with burn-in included
for b in range(burnin+T):
X = np.concatenate((X,
const + ar_coefs.T @ X[0:ar_p,:] + error_coef * errors_all[b,0:x_p]),
axis=0)
# Return only the last T observations (we have removed the dependency on the initial draws)
return X[-T:,]
def generate_iid_process(T=100, x_p=5, distribution="normal", **kwargs):
# Extract for normal
mu = kwargs.get('mu', 0)
sigma = kwargs.get('sigma', 1)
covariance = kwargs.get('covariance', 0)
# Extract for uniform
lower_bound = kwargs.get('lower_bound', 0)
upper_bound = kwargs.get('upper_bound', 1)
# Construct variance-covariance matrix
cov_diag = np.diag(np.repeat(a=sigma**2, repeats=x_p))
cov_off_diag= np.ones(shape=(x_p,x_p)) * covariance
np.fill_diagonal(a=cov_off_diag, val=0)
cov_mat = cov_diag + cov_off_diag
# Generate X
if distribution=="normal":
# Draw from normal distribution
X = np.random.multivariate_normal(mean=np.repeat(a=mu, repeats=x_p),
cov=cov_mat,
size=T)
elif distribution=="uniform":
# Draw from uniform distribution
X = np.random.uniform(low=lower_bound,
high=upper_bound,
size=(T,x_p))
else:
raise WrongInputException(input_name="distribution",
provided_input=distribution,
allowed_inputs=["normal", "uniform"])
return X
def generate_errors(N=1000, p=5, mu=0, sigma=1, cov_X=0.25, cov_y=0.5):
# Number of dimensions including y
n_dim = p+1
## Construct variance-covariance matrix
# Construct diagonal with variance = sigma^2
cov_diag = np.diag(np.repeat(a=sigma**2, repeats=n_dim))
## Construct off-diagonal with covariances
# Fill out for X (and y)
cov_off_diag = np.ones(shape=(n_dim,n_dim)) * cov_X
# Update y entries
cov_off_diag[p,:] = cov_off_diag[:,p] = cov_y
# Set diagonal to zero
np.fill_diagonal(a=cov_off_diag, val=0)
# Update final variance-covariance matrix
cov_mat = cov_diag + cov_off_diag
# Generate epsilon
eps = np.random.multivariate_normal(mean=np.repeat(a=mu, repeats=n_dim),
cov=cov_mat,
size=N)
return eps
#------------------------------------------------------------------------------
# Generate f_star = E[Y|X=x]
#------------------------------------------------------------------------------
def _solve_meta_problem(A,B,w):
"""
Solve diag(X @ A') = B @ w for X such that X_ij>=0 and sum_j(X_ij)==1 for all i
"""
# Vectorize weights
w = _vectorize_beta(beta=w,x=B)
# Set up variable to solve for
X = cp.Variable(shape=(A.shape))
# Set up constraints
constraints = [X >= 0,
X @ np.ones(shape=(A.shape[1],)) == 1
]
# Set up objective function
objective = cp.Minimize(cp.sum_squares(cp.diag(X @ A.T) - B @ w))
# Instantiate
problem = cp.Problem(objective=objective, constraints=constraints)
# Solve (No need to specify solver because by default CVXPY calls the solver most specialized to the problem type)
problem.solve(verbose=False)
return X.value
def _vectorize_beta(beta,x):
"""
Turn supplied beta into an appropriate shape
"""
if isinstance(beta, (int, float, np.integer)):
beta = np.repeat(a=beta, repeats=x.shape[1])
elif isinstance(beta, np.ndarray):
if len(beta)<x.shape[1]:
beta = np.tile(A=beta, reps=int(np.ceil(x.shape[1]/len(beta))))
# Shorten potentially
beta = beta[:x.shape[1]]
elif isinstance(beta, str):
if beta=="uniform":
beta = np.repeat(a=1/x.shape[1], repeats=x.shape[1])
elif beta=="flip_uniform":
beta = np.repeat(a=1/x.shape[1], repeats=x.shape[1])
else:
raise WrongInputException(input_name="beta",
provided_input=beta,
allowed_inputs=[int, float, str, np.ndarray, np.integer])
# Make sure beta has the right dimensions
beta = beta.reshape(-1,)
if x.shape[1]!=beta.shape[0]:
raise Exception(f"Beta is {beta.shape}-dim vector, but X is {x.shape}-dim matrix")
return beta
def generate_linear_data(x,
beta=1,
beta_handling="default",
include_intercept=False,
expand=False,
degree=2,
interaction_only=False,
enforce_limits=False,
tol_fstar=100,
**kwargs):
#
BETA_HANDLING_ALLOWED = ["default", "structural", "split_order"]
# Convert to np and break link
x = np.array(x.copy())
# Convert extrama points of X
if enforce_limits:
x_min, x_max = np.min(x, axis=1), np.max(x, axis=1)
# Series expansion of X
if expand:
if degree<2:
raise Exception(f"When polynomial features are generated (expand=True), 'degree' must be >=2. It is curently {degree}")
# Instantiate
polynomialfeatures = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly = polynomialfeatures.fit_transform(x)[:,x.shape[1]:]
# Concatenate
x_all = np.concatenate((x,x_poly), axis=1)
else:
x_all = x
# Include a constant in X
if include_intercept:
x = add_constant(data=x, prepend=True, has_constant='skip')
# Different ways to generating beta and fstar
if beta_handling=="default":
# Make beta a conformable vector
beta = _vectorize_beta(beta=beta,x=x_all)
# Generate fstar=E[y|X=x]
f_star = x_all @ beta
elif beta_handling=="structural":
# Get tricky weight matrix, solving diag(WX')=X_all*beta_uniform
weights = _solve_meta_problem(A=x, B=x_all, w="uniform")
# Generate fstar=E[y|X=x]
f_star = np.diagonal(weights @ x.T)
# Fact check this
f_star_check = x_all @ _vectorize_beta(beta="uniform",x=x_all)
if np.sum(f_star-f_star_check) > tol_fstar:
raise Exception("Trickiness didn't work as differences are above tolerance")
elif beta_handling=="split_order":
if isinstance(beta, (int, float, str, np.integer)):
raise Exception("Whenever 'beta_handling'='split_order', then 'beta' cannot be either (int, float, str)")
elif len(beta)!=degree:
raise Exception(f"beta is if length {len(beta)}, but MUST be of length {degree}")
if not expand:
raise Exception("Whenever 'beta_handling'='split_order', then 'expand' must be True")
# First-order beta
beta_first_order = _vectorize_beta(beta=beta[0],x=x)
# Higher-order beta
beta_higher_order = np.empty(shape=(0,))
# Initialize
higher_order_col = 0
for higher_order in range(2,degree+1):
# Instantiate
poly_temp = PolynomialFeatures(degree=higher_order, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly_temp = poly_temp.fit_transform(x)[:,x.shape[1]+higher_order_col:]
# Generate temporary betas for this degree of the expansion
beta_higher_order_temp = _vectorize_beta(beta=beta[higher_order-1],x=x_poly_temp)
# Append betas
beta_higher_order = np.append(arr=beta_higher_order, values=beta_higher_order_temp)
# Add column counter that governs which columns to match in X
higher_order_col += x_poly_temp.shape[1]
# Generate fstar=E[y|X=x]
f_star = x @ beta_first_order + x_poly @ beta_higher_order
else:
raise WrongInputException(input_name="beta_handling",
provided_input=beta_handling,
allowed_inputs=BETA_HANDLING_ALLOWED)
# Reshape for conformity
f_star = f_star.reshape(-1,)
if enforce_limits:
f_star = np.where(f_star<x_min, x_min, f_star)
f_star = np.where(f_star>x_max, x_max, f_star)
return f_star
def generate_friedman_data_1(x, **kwargs):
# Convert to np and break link
x = np.array(x.copy())
# Sanity check
if x.shape[1]<5:
raise Exception(f"Friedman 1 requires at least 5 regresors, but only {x.shape[1]} are provided in x")
# Generate fstar=E[y|X=x]
f_star = 0.1*np.exp(4*x[:,0]) + 4/(1+np.exp(-20*(x[:,1]-0.5))) + 3*x[:,2] + 2*x[:,3] + 1*x[:,4]
# Reshape for conformity
f_star = f_star.reshape(-1,)
return f_star
def generate_friedman_data_2(x, **kwargs):
# Convert to np and break link
x = np.array(x.copy())
# Sanity check
if x.shape[1]<5:
raise Exception(f"Friedman 2 requires at least 5 regresors, but only {x.shape[1]} are provided in x")
# Generate fstar=E[y|X=x]
f_star = 10*np.sin(np.pi*x[:,0]*x[:,1]) + 20*(x[:,2]-0.5)**2 + 10*x[:,3] + 5*x[:,4]
# Reshape for conformity
f_star = f_star.reshape(-1,)
return f_star
#------------------------------------------------------------------------------
# Simulate data
#------------------------------------------------------------------------------
def simulate_data(f,
T0=500,
T1=50,
X_type="AR",
X_dist="normal",
X_dim=5,
AR_lags=3,
ate=1,
eps_mean=0,
eps_std=1,
eps_cov_x=0,
eps_cov_y=0,
**kwargs):
# Total number of time periods
T = T0 + T1
# Generate errors
errors = generate_errors(N=T, p=X_dim, mu=eps_mean, sigma=eps_std, cov_X=eps_cov_x, cov_y=eps_cov_y)
# Generate covariates
if X_type=="AR":
X = generate_ar_process(T=T,
x_p=X_dim,
ar_p=AR_lags,
errors=errors)
elif X_type=="iid":
X = generate_iid_process(T=T,x_p=X_dim,distribution=X_dist, **kwargs)
# Generate W
W = np.repeat((0,1), (T0,T1))
# Generate Y
Y = f(x=X, **kwargs) + ate*W + errors[:,-1]
# Collect data
df = pd.concat(objs=[pd.Series(data=Y,name="Y"),
| pd.Series(data=W,name="W") | pandas.Series |
import numpy as np
import pandas as pd
import yfinance as yf
from yahoo_earnings_calendar import YahooEarningsCalendar
import mktanalytics as ma
from tqdm.notebook import tqdm
def nearest(items, pivot):
return min(items, key=lambda x: abs(x - pivot))
def get_atm_vol(undl_list, weeks=1, calc_strangle=False, target_price=1):
atm_dict = {}
date_dict = {}
volume_dict = {}
no_options_list = []
if calc_strangle:
target_put = {}
target_call = {}
for u in tqdm(undl_list):
try:
ticker = yf.Ticker(u)
option_expiries = list(ticker.options)
option_expiries = [ | pd.Timestamp(x) | pandas.Timestamp |
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import decomposition
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
def run_train(fold):
df = pd.read_csv('../input/train_folds.csv')
df.review = df.review.apply(str)
df_train = df[df.kfold != fold].reset_index(drop=True)
df_valid = df[df.kfold == fold].reset_index(drop=True)
tfv = TfidfVectorizer()
tfv.fit(df_train.review.values)
X_train = tfv.transform(df_train.review.values)
X_valid = tfv.transform(df_valid.review.values)
svd = decomposition.TruncatedSVD(n_components=120, random_state = 42)
svd.fit(X_train)
X_train_svd = svd.transform(X_train)
X_valid_svd = svd.transform(X_valid)
y_train = df_train.sentiment.values
y_valid = df_valid.sentiment.values
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=42)
clf.fit(X_train_svd, y_train)
pred = clf.predict_proba(X_valid_svd)[:, 1]
auc = roc_auc_score(y_valid, pred)
print(f'Fold= {fold}, AUC = {auc}')
df_valid.loc[:, 'rf_svd_pred'] = pred
return df_valid[['id', 'sentiment', 'kfold', 'rf_svd_pred']]
if __name__ == "__main__":
dfs = []
for j in range(5):
temp_df = run_train(j)
dfs.append(temp_df)
fin_valid_df = | pd.concat(dfs) | pandas.concat |
import argparse
import numpy as np
import pandas as pd
from scipy import stats
EXPRESSION_MATRIX_METADATA = ['Genotype', 'Genotype_Group', 'Replicate', 'Condition', 'tenXBarcode']
RANDOM_SEED = 42
def main():
ap = argparse.ArgumentParser(description="Create a synthetic UMI count table")
ap.add_argument("-d", "--dist_file", dest="file", help="Expression data table", metavar="FILE", default=None)
ap.add_argument("-s", "--ss_file", dest="ssfile", help="Single-Cell Expression data table", metavar="FILE",
required=True)
ap.add_argument("-o", "--out", dest="out", help="Output count table", metavar="FILE", required=True)
ap.add_argument("--log", dest="log", help="Data is log-transformed", action='store_const', const=True,
default=False)
ap.add_argument("--shuffle", dest="shuffle", help="Don't simulate; just reshuffle", action='store_const',
const=True, default=False)
args = ap.parse_args()
synthesize_data(args.file, args.ssfile, args.out, dist_is_log=args.log, reshuffle_data=args.shuffle)
def synthesize_data(distribution_file_name, single_cell_file_name, output_file_name, dist_is_log=False,
reshuffle_data=False):
np.random.seed(RANDOM_SEED)
print("Reading single-cell data")
ss_df = | pd.read_csv(single_cell_file_name, sep="\t", header=0, index_col=0) | pandas.read_csv |
import numpy as np
from scipy.io import loadmat
import os
from pathlib import Path
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
# plotting parameters
sns.set(font_scale=1.1)
sns.set_context("talk")
sns.set_palette(['#701f57', '#ad1759', '#e13342', '#f37651'])
transparent = False
markers = ['o','^','s']
# Plot runtime as we increase the number of sensors
path = Path(__file__).parent / os.path.join('..','matlab','data') # path to the saved results from matlab
outpath = os.path.join(Path(__file__).parent,'figures')
if not os.path.exists(outpath):
os.makedirs(outpath)
Ms = range(7,13)
res_data = | pd.DataFrame() | pandas.DataFrame |
######### imports #########
from ast import arg
from datetime import timedelta
import sys
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_constants import *
from Reff_functions import *
import glob
import os
from sys import argv
import arviz as az
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
from math import ceil
import pickle
from cmdstanpy import CmdStanModel
matplotlib.use("Agg")
from params import (
truncation_days,
start_date,
third_start_date,
alpha_start_date,
omicron_start_date,
omicron_only_date,
omicron_dominance_date,
pop_sizes,
num_forecast_days,
get_all_p_detect_old,
get_all_p_detect,
)
def process_vax_data_array(
data_date,
third_states,
third_end_date,
variant="Delta",
print_latest_date_in_ts=False,
):
"""
Processes the vaccination data to an array for either the Omicron or Delta strain.
"""
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state.loc[
vaccination_by_state["variant"] == variant
]
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
if print_latest_date_in_ts:
# display the latest available date in the NSW data (will be the same date between states)
print(
"Latest date in vaccine data is {}".format(
vaccination_by_state[vaccination_by_state.state == "NSW"].date.values[-1]
)
)
# Get only the dates we need + 1 (this serves as the initial value)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (vaccination_by_state.date <= third_end_date)
]
vaccination_by_state = vaccination_by_state[
vaccination_by_state["state"].isin(third_states)
] # Isolate fitting states
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state = pd.concat(
[vaccination_by_state]
+ [
pd.Series(vaccination_by_state[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# Convert to simple array only useful to pass to stan (index 1 onwards)
vaccination_by_state_array = vaccination_by_state.iloc[:, 1:].to_numpy()
return vaccination_by_state_array
def get_data_for_posterior(data_date):
"""
Read in the various datastreams and combine the samples into a dictionary that we then
dump to a pickle file.
"""
print("Performing inference on state level Reff")
data_date = pd.to_datetime(data_date) # Define data date
print("Data date is {}".format(data_date.strftime("%d%b%Y")))
fit_date = pd.to_datetime(data_date - timedelta(days=truncation_days))
print("Last date in fitting {}".format(fit_date.strftime("%d%b%Y")))
# * Note: 2020-09-09 won't work (for some reason)
# read in microdistancing survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
# read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
# save the case file for convenience
df_state.to_csv("results/cases_" + data_date.strftime("%Y-%m-%d") + ".csv")
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True, moving_window=7)
# df_google = read_in_google(moving=False)
df = df_google.merge(df_Reff[[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]],
on=["date", "state"],
how="inner",
)
######### Create useable dataset #########
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
"NSW",
# "VIC",
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are featured in omicron AND third wave
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
count_by_state = []
respond_by_state = []
mask_wearing_count_by_state = []
mask_wearing_respond_by_state = []
include_in_first_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: dfX.date.values[-1]]
survey_counts = survey_counts_base.loc[: dfX.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: dfX.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: dfX.date.values[-1]]
for state in first_states:
mobility_by_state.append(dfX.loc[dfX.state == state, predictors].values / 100)
mobility_std_by_state.append(
dfX.loc[dfX.state == state, [val + "_std" for val in predictors]].values / 100
)
count_by_state.append(survey_counts.loc[start_date:first_end_date, state].values)
respond_by_state.append(survey_respond.loc[start_date:first_end_date, state].values)
mask_wearing_count_by_state.append(
mask_wearing_counts.loc[start_date:first_end_date, state].values
)
mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[start_date:first_end_date, state].values
)
include_in_first_wave.append(
dfX.loc[dfX.state == state, "is_first_wave"].values
)
# SECOND PHASE
sec_mobility_by_state = []
sec_mobility_std_by_state = []
sec_count_by_state = []
sec_respond_by_state = []
sec_mask_wearing_count_by_state = []
sec_mask_wearing_respond_by_state = []
include_in_sec_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df2X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df2X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df2X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df2X.date.values[-1]]
for state in sec_states:
sec_mobility_by_state.append(
df2X.loc[df2X.state == state, predictors].values / 100
)
sec_mobility_std_by_state.append(
df2X.loc[df2X.state == state, [val + "_std" for val in predictors]].values / 100
)
sec_count_by_state.append(
survey_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_respond_by_state.append(
survey_respond.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[sec_start_date:sec_end_date, state].values
)
include_in_sec_wave.append(df2X.loc[df2X.state == state, "is_sec_wave"].values)
# THIRD WAVE
third_mobility_by_state = []
third_mobility_std_by_state = []
third_count_by_state = []
third_respond_by_state = []
third_mask_wearing_count_by_state = []
third_mask_wearing_respond_by_state = []
include_in_third_wave = []
include_in_omicron_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df3X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df3X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df3X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df3X.date.values[-1]]
for state in third_states:
third_mobility_by_state.append(
df3X.loc[df3X.state == state, predictors].values / 100
)
third_mobility_std_by_state.append(
df3X.loc[df3X.state == state, [val + "_std" for val in predictors]].values / 100
)
third_count_by_state.append(
survey_counts.loc[third_start_date:third_end_date, state].values
)
third_respond_by_state.append(
survey_respond.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[third_start_date:third_end_date, state].values
)
include_in_third_wave.append(
df3X.loc[df3X.state == state, "is_third_wave"].values
)
include_in_omicron_wave.append(
df3X.loc[df3X.state == state, "is_omicron_wave"].values
)
# policy boolean flag for after travel ban in each wave
policy = dfX.loc[
dfX.state == first_states[0], "post_policy"
] # this is the post ban policy
policy_sec_wave = [1] * df2X.loc[df2X.state == sec_states[0]].shape[0]
policy_third_wave = [1] * df3X.loc[df3X.state == third_states[0]].shape[0]
# read in the vaccination data
delta_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Delta",
print_latest_date_in_ts=True,
)
omicron_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Omicron",
)
# Make state by state arrays
state_index = {state: i + 1 for i, state in enumerate(states_to_fit_all_waves)}
# dates to apply alpha in the second wave (this won't allow for VIC to be added as
# the date_ranges are different)
apply_alpha_sec_wave = (
sec_date_range["NSW"] >= pd.to_datetime(alpha_start_date)
).astype(int)
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(third_start_date)
).days
omicron_only_day = (
pd.to_datetime(omicron_only_date) - pd.to_datetime(third_start_date)
).days
heterogeneity_start_day = (
pd.to_datetime("2021-08-20") - pd.to_datetime(third_start_date)
).days
# number of days we fit the average VE over
tau_vax_block_size = 3
# get pop size array
pop_size_array = []
for s in states_to_fit_all_waves:
pop_size_array.append(pop_sizes[s])
p_detect = get_all_p_detect_old(
states=third_states,
end_date=third_end_date,
num_days=df3X.loc[df3X.state == "NSW"].shape[0],
)
df_p_detect = pd.DataFrame(p_detect, columns=third_states)
df_p_detect["date"] = third_date_range["NSW"]
df_p_detect.to_csv("results/CA_" + data_date.strftime("%Y-%m-%d") + ".csv")
# p_detect = get_all_p_detect(
# end_date=third_end_date,
# num_days=df3X.loc[df3X.state == "NSW"].shape[0],
# )
# input data block for stan model
input_data = {
"j_total": len(states_to_fit_all_waves),
"N_first": dfX.loc[dfX.state == first_states[0]].shape[0],
"K": len(predictors),
"j_first": len(first_states),
"Reff": data_by_state["mean"].values,
"mob": mobility_by_state,
"mob_std": mobility_std_by_state,
"sigma2": data_by_state["std"].values ** 2,
"policy": policy.values,
"local": data_by_state["local"].values,
"imported": data_by_state["imported"].values,
"N_sec": df2X.loc[df2X.state == sec_states[0]].shape[0],
"j_sec": len(sec_states),
"Reff_sec": sec_data_by_state["mean"].values,
"mob_sec": sec_mobility_by_state,
"mob_sec_std": sec_mobility_std_by_state,
"sigma2_sec": sec_data_by_state["std"].values ** 2,
"policy_sec": policy_sec_wave,
"local_sec": sec_data_by_state["local"].values,
"imported_sec": sec_data_by_state["imported"].values,
"apply_alpha_sec": apply_alpha_sec_wave,
"N_third": df3X.loc[df3X.state == "NSW"].shape[0],
"j_third": len(third_states),
"Reff_third": third_data_by_state["mean"].values,
"Reff_omicron": third_data_by_state["mean_omicron"].values,
"mob_third": third_mobility_by_state,
"mob_third_std": third_mobility_std_by_state,
"sigma2_third": third_data_by_state["std"].values ** 2,
"sigma2_omicron": third_data_by_state["std_omicron"].values ** 2,
"policy_third": policy_third_wave,
"local_third": third_data_by_state["local"].values,
"imported_third": third_data_by_state["imported"].values,
"count_md": count_by_state,
"respond_md": respond_by_state,
"count_md_sec": sec_count_by_state,
"respond_md_sec": sec_respond_by_state,
"count_md_third": third_count_by_state,
"respond_md_third": third_respond_by_state,
"count_masks": mask_wearing_count_by_state,
"respond_masks": mask_wearing_respond_by_state,
"count_masks_sec": sec_mask_wearing_count_by_state,
"respond_masks_sec": sec_mask_wearing_respond_by_state,
"count_masks_third": third_mask_wearing_count_by_state,
"respond_masks_third": third_mask_wearing_respond_by_state,
"map_to_state_index_first": [state_index[state] for state in first_states],
"map_to_state_index_sec": [state_index[state] for state in sec_states],
"map_to_state_index_third": [state_index[state] for state in third_states],
"total_N_p_sec": sum([sum(x) for x in include_in_sec_wave]).item(),
"total_N_p_third": sum([sum(x) for x in include_in_third_wave]).item(),
"include_in_first": include_in_first_wave,
"include_in_sec": include_in_sec_wave,
"include_in_third": include_in_third_wave,
"pos_starts_sec": np.cumsum([sum(x) for x in include_in_sec_wave]).astype(int).tolist(),
"pos_starts_third": np.cumsum(
[sum(x) for x in include_in_third_wave]
).astype(int).tolist(),
"ve_delta_data": delta_vaccination_by_state_array,
"ve_omicron_data": omicron_vaccination_by_state_array,
"omicron_start_day": omicron_start_day,
"omicron_only_day": omicron_only_day,
"include_in_omicron": include_in_omicron_wave,
"total_N_p_third_omicron": int(sum([sum(x) for x in include_in_omicron_wave]).item()),
"pos_starts_third_omicron": np.cumsum(
[sum(x) for x in include_in_omicron_wave]
).astype(int).tolist(),
'tau_vax_block_size': tau_vax_block_size,
'total_N_p_third_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave])
),
'pos_starts_third_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave]
).astype(int),
'total_N_p_third_omicron_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave])
),
'pos_starts_third_omicron_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave]
).astype(int),
"pop_size_array": pop_size_array,
"heterogeneity_start_day": heterogeneity_start_day,
"p_detect": p_detect,
}
# dump the dictionary to a json file
with open("results/stan_input_data.pkl", "wb") as f:
pickle.dump(input_data, f)
return None
def run_stan(
data_date,
num_chains=4,
num_samples=1000,
num_warmup_samples=500,
max_treedepth=12,
):
"""
Read the input_data.json in and run the stan model.
"""
data_date = pd.to_datetime(data_date)
# read in the input data as a dictionary
with open("results/stan_input_data.pkl", "rb") as f:
input_data = pickle.load(f)
# make results and figs dir
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
os.makedirs(figs_dir, exist_ok=True)
os.makedirs(results_dir, exist_ok=True)
# path to the stan model
# basic model with a switchover between Reffs
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover.stan"
# mixture model with basic susceptible depletion
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection.stan"
# model that incorporates a waning in infection acquired immunity but is coded as a mixture
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix_waning_infection.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection_single_md.stan"
# compile the stan model
model = CmdStanModel(stan_file=rho_model_gamma)
# obtain a posterior sample from the model conditioned on the data
fit = model.sample(
chains=num_chains,
iter_warmup=num_warmup_samples,
iter_sampling=num_samples,
data=input_data,
max_treedepth=max_treedepth,
refresh=10
)
# display convergence diagnostics for the current run
print("===========")
print(fit.diagnose())
print("===========")
# save output file to
fit.save_csvfiles(dir=results_dir)
df_fit = fit.draws_pd()
df_fit.to_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_all_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# save a summary file for all parameters; this involves ESS and ESS/s as well as summary stats
fit_summary = fit.summary()
fit_summary.to_csv(filename)
# now save a small summary to easily view key parameters
pars_of_interest = ["bet[" + str(i + 1) + "]" for i in range(5)]
pars_of_interest = pars_of_interest + ["R_Li[" + str(i + 1) + "]" for i in range(8)]
pars_of_interest = pars_of_interest + [
"R_I",
"R_L",
"theta_md",
"theta_masks",
"sig",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
pars_of_interest = pars_of_interest + [
col for col in df_fit if "phi" in col and "simplex" not in col
]
# save a summary for ease of viewing
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_main_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
fit_summary.loc[pars_of_interest].to_csv(filename)
return None
def plot_and_save_posterior_samples(data_date):
"""
Runs the full suite of plotting.
"""
data_date = pd.to_datetime(data_date) # Define data date
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# read in the posterior sample
samples_mov_gamma = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# * Note: 2020-09-09 won't work (for some reason)
######### Read in microdistancing (md) surveys #########
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True)
df = df_google.merge(
df_Reff[
[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]
],
on=["date", "state"],
how="inner",
)
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": | pd.date_range(start="2020-03-01", end=first_end_date) | pandas.date_range |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.integrate
import scipy.special
import collections
import fisx
import logging
from contextlib import contextmanager
from ..utils import instance
from ..utils import cache
from ..utils import listtools
from ..math import fit1d
from ..math.utils import weightedsum
from . import xrayspectrum
from ..simulation.classfactory import with_metaclass
from ..simulation import xrmc
from ..simulation import xmimsim
from ..math import noisepropagation
from . import pymca
from . import element
from ..materials import compoundfromdb
from ..materials import mixture
from ..materials import types
from ..utils.copyable import Copyable
from .utils import reshape_spectrum_lines
from ..io import localfs
from ..io import spe
logger = logging.getLogger(__name__)
class Layer(Copyable):
def __init__(self, material=None, thickness=None, fixed=False, parent=None):
"""
Args:
material(compound|mixture|str): material composition
thickness(num): thickness in cm
fixed(bool): thickness and composition are fixed
parent(Multilayer): part of this ensemble
"""
if instance.isstring(material):
ret = compoundfromdb.factory(material)
if ret is None:
raise RuntimeError("Invalid material {}".format(material))
material = ret
self.material = material
self.thickness = thickness
self.fixed = fixed
self.parent = parent
def __getstate__(self):
return {
"material": self.material,
"thickness": self.thickness,
"fixed": self.fixed,
}
def __setstate__(self, state):
self.material = state["material"]
self.thickness = state["thickness"]
self.fixed = state["fixed"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.material == other.material
and self.thickness == other.thickness
and self.fixed == other.fixed
)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __getattr__(self, attr):
return getattr(self.material, attr)
def __str__(self):
return "{} um ({})".format(self.thickness * 1e4, self.material)
@property
def xraythicknessin(self):
return self.thickness / self.parent.geometry.cosnormin
@xraythicknessin.setter
def xraythicknessin(self, value):
self.thickness = value * self.parent.geometry.cosnormin
@property
def xraythicknessout(self):
return self.thickness / self.parent.geometry.cosnormout
@xraythicknessout.setter
def xraythicknessout(self, value):
self.thickness = value * self.parent.geometry.cosnormout
def absorbance(self, energy, weights=None, out=False, **kwargs):
kwargs.pop("decomposed", None)
if out:
thickness = self.xraythicknessout
else:
thickness = self.xraythicknessin
return self.material.absorbance(energy, thickness, weights=weights, **kwargs)
def addtofisx(self, setup, cfg):
name = cfg.addtofisx_material(self.material)
return [name, self.density, self.thickness]
def fisxgroups(self, emin=0, emax=np.inf):
return self.material.fisxgroups(emin=emin, emax=emax)
def arealdensity(self):
wfrac = self.material.elemental_massfractions()
m = self.density * self.thickness
return dict(zip(wfrac.keys(), np.asarray(list(wfrac.values())) * m))
class Multilayer(with_metaclass((Copyable, cache.Cache))):
"""
Class representing a multilayer of compounds or mixtures
"""
FISXCFG = pymca.FisxConfig()
def __init__(
self, material=None, thickness=None, fixed=False, geometry=None, name=None
):
"""
Args:
material(list(spectrocrunch.materials.compound|mixture)): layer composition
thickness(list(num)): layer thickness in cm
fixed(list(num)): do not change this layer
geometry(spectrocrunch.geometries.base.Centric):
"""
self.geometry = geometry
if not instance.isarray(material):
material = [material]
if not instance.isarray(thickness):
thickness = [thickness]
if not instance.isarray(fixed):
fixed = [fixed]
if len(fixed) != len(material) and len(fixed) == 1:
fixed = fixed * len(material)
self.layers = [
Layer(material=mat, thickness=t, fixed=f, parent=self)
for mat, t, f in zip(material, thickness, fixed)
]
if not name:
name = "MULTILAYER"
self.name = name
super(Multilayer, self).__init__(force=True)
def __getstate__(self):
return {"layers": self.layers, "geometry": self.geometry}
def __setstate__(self, state):
self.layers = state["layers"]
for layer in self.layers:
layer.parent = self
self.geometry = state["geometry"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.layers == other.layers and self.geometry == other.geometry
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.layers)
def __getitem__(self, index):
return self.layers[index]
@property
def nlayers(self):
return len(self.layers)
def fixediter(self):
for layer in self:
if layer.fixed:
yield layer
def freeiter(self):
for layer in self:
if not layer.fixed:
yield layer
def __str__(self):
layers = "\n ".join(
"Layer {}. {}".format(i, str(layer)) for i, layer in enumerate(self)
)
return "Multilayer (ordered top-bottom):\n {}".format(layers)
def markscatterer(self, name):
for layer in self:
layer.markscatterer(name)
def ummarkscatterer(self):
for layer in self:
layer.ummarkscatterer()
@property
def density(self):
return np.vectorize(lambda layer: layer.density)(self)
@property
def thickness(self):
return np.vectorize(lambda layer: layer.thickness)(self)
@property
def xraythicknessin(self):
return np.vectorize(lambda layer: layer.xraythicknessin)(self)
@property
def xraythicknessout(self):
return np.vectorize(lambda layer: layer.xraythicknessin)(self)
def arealdensity(self):
ret = collections.Counter()
for layer in self:
ret.update(layer.arealdensity())
return dict(ret)
def elemental_massfractions(self):
ret = self.arealdensity()
s = sum(ret.values())
return {el: w / s for el, w in ret.items()}
def change_elemental_massfraction(self, Z, wZ):
# wZ * sum_li(w_il*rho_il*t_il) = sum_l(w_Zl*rho_Zl*t_zl)
# a: layers that contain Z
# b: layers that do not contain Z
# wZ * sum_ai(w_ia*rho_a*t_a) + wZ * sum_bi(w_ib*rho_b*t_b) = sum_a(w_Za*rho_a*t_a) + sum_b(w_Zb*rho_b*t_b)
#
# t_A = sum_a(t_a)
# t_B = sum_b(t_b)
# t_a = t_A*r_a
# t_b = t_B*r_b = t*r_b - t_A*r_b
# t_B = t - t_A
#
# denom = + wZ * sum_ai(w_ia*rho_a*r_a) - sum_a(w_Za*rho_a*r_a)
# - wZ * sum_bi(w_ib*rho_b*r_b) + sum_b(w_Zb*rho_b*r_b)
# num = t * sum_b(w_Zb*rho_b*r_b) - wZ*t * sum_bi(w_ib*rho_b*r_b)
# t_A = num/denom
#
# w_Zb = 0
#
# num = t * wZ * sum_bi(w_ib*rho_b*r_b)
# denom = sum_a(w_Za*rho_a*r_a) - wZ * [sum_bi(w_ib*rho_b*r_b) - sum_ai(w_ia*rho_a*r_a)]
pass
def elemental_molefractions(self):
return self.mixlayers().elemental_molefractions()
def elemental_equivalents(self):
return self.mixlayers().elemental_equivalents()
def mixlayers(self):
n = len(self)
if n == 0:
return None
elif n == 1:
return self[0].material
else:
vfrac = self.thickness
vfrac = vfrac / float(vfrac.sum())
materials = [layer.material for layer in self]
return mixture.Mixture(
materials, vfrac, types.fraction.volume, name=self.name
)
def mass_att_coeff(self, energy):
"""Total mass attenuation coefficient
Args:
energy(num|array): keV
Returns:
array: nz x nenergy
"""
return np.asarray(
[instance.asarray(layer.mass_att_coeff(energy)) for layer in self]
)
def markabsorber(self, symb, shells=[], fluolines=[]):
"""
Args:
symb(str): element symbol
"""
for layer in self:
layer.markabsorber(symb, shells=shells, fluolines=fluolines)
def unmarkabsorber(self):
for layer in self:
layer.unmarkabsorber()
def absorbance(self, energy, weights=None, out=False, fine=False, decomposed=False):
if decomposed:
return [
layer.absorbance(energy, weights=weights, out=out, fine=fine)
for layer in self
]
else:
return np.sum(
[
layer.absorbance(energy, weights=weights, out=out, fine=fine)
for layer in self
],
axis=0,
)
def transmission(
self, energy, weights=None, out=False, fine=False, decomposed=False
):
A = self.absorbance(
energy, weights=weights, out=out, fine=fine, decomposed=decomposed
)
if decomposed:
return A # TODO: apply recursively
else:
return np.exp(-A)
def fixlayers(self, ind=None):
if ind is None:
for layer in self:
layer.fixed = True
else:
for i in ind:
self[i].fixed = True
def freelayers(self, ind=None):
if ind is None:
for layer in self:
layer.fixed = False
else:
for i in ind:
self[i].fixed = False
def _refine_linear(self, A, y, constant=False, constraint=True):
y = instance.asarray(y)
if y.size == 1 and len(A) == 1:
return y / A[0]
if constant:
A.append(np.ones_like(y))
A = np.vstack(A).T
if constraint:
lb = np.zeros(len(A), dtype=float)
lb[-1] = -np.inf
ub = np.inf
params = fit1d.lstsq_bound(A, y, lb, ub)
else:
params = fit1d.lstsq(A, y)
params = params[:-1]
else:
A = np.vstack(A).T
if constraint:
params = fit1d.lstsq_nonnegative(A, y)
else:
params = fit1d.lstsq(A, y)
return params
def _refinerhod(
self, energy, absorbance, refinedattr, fixedattr, weights=None, **kwargs
):
y = absorbance
for layer in self.fixediter():
y = y - layer.absorbance(energy)
A = [layer.mass_att_coeff(energy) for layer in self.freeiter()]
if weights is not None:
A = [weightedsum(csi, weights=weights) for csi in A]
params = self._refine_linear(A, y, **kwargs)
for param, layer in zip(params, self.freeiter()):
setattr(layer, refinedattr, param / getattr(layer, fixedattr))
logger.info(
'Refined {} of "{}": {}'.format(
refinedattr, layer, getattr(layer, refinedattr)
)
)
def refinecomposition(
self, energy, absorbance, weights=None, fixthickness=True, **kwargs
):
y = absorbance
for layer in self.fixediter():
y = y - layer.absorbance(energy, weights=weights)
A = []
for layer in self.freeiter():
mu = layer.mass_att_coeff(energy, decomposed=True)
w, cs = layer.csdict_parse(mu)
if weights is not None:
cs = [weightedsum(csi, weights=weights) for csi in cs]
A.extend(cs)
params = self._refine_linear(A, y, **kwargs)
for layer in self.freeiter():
n = layer.nparts
w = params[0:n]
params = params[n:]
s = w.sum()
w = w / s
w = dict(zip(layer.parts.keys(), w))
layer.change_fractions(w, "mass")
if fixthickness:
layer.density = s / layer.xraythicknessin
logger.info(
'Refined density of "{}": {} g/cm^3'.format(layer, layer.density)
)
else:
layer.xraythicknessin = s / layer.density
logger.info(
'Refined thickness "{}": {} g/cm^3'.format(
layer, layer.xraythicknessin
)
)
def refinethickness(self, energy, absorbance, **kwargs):
self._refinerhod(energy, absorbance, "xraythicknessin", "density", **kwargs)
def refinedensity(self, energy, absorbance, **kwargs):
self._refinerhod(energy, absorbance, "density", "xraythicknessin", **kwargs)
def _cache_layerinfo(self):
t = np.empty(self.nlayers + 1)
np.cumsum(self.thickness, out=t[1:])
t[0] = 0
if self.geometry.reflection:
zexit = 0.0
else:
zexit = t[-1]
return {"cumul_thickness": t, "zexit": zexit}
def _zlayer(self, z):
"""Get layer in which z falls
Args:
z(num|array): depth
Returns:
num|array:
0 when z<=0
n+1 when z>totalthickness
{1,...,n} otherwise (the layers)
"""
layerinfo = self.getcache("layerinfo")
ret = np.digitize(z, layerinfo["cumul_thickness"], right=True)
return instance.asscalar(ret)
def _cache_attenuationinfo(self, energy):
energy = np.unique(instance.asarray(energy))
nenergies = len(energy)
density = self.density[:, np.newaxis]
thickness = self.thickness[:, np.newaxis]
mu = self.mass_att_coeff(energy)
# We will add one layer at the beginning and one at the end, both vacuum
# linear attenuation coefficient for each layer
linatt = mu * density
linattout = np.empty((self.nlayers + 2, nenergies), dtype=linatt.dtype)
linattout[1:-1, :] = linatt
linattout[[0, -1], :] = 0 # outside sample (vacuum)
# Cumulative linear attenuation coefficient (= linatt*z + correction)
attall = (linatt * thickness).sum(axis=0)
cor = np.empty((self.nlayers + 2, nenergies), dtype=attall.dtype)
cor[0, :] = 0 # before sample (vacuum)
cor[-1, :] = attall # after sample
for i in range(nenergies):
tmp = np.subtract.outer(linatt[:, i], linatt[:, i])
tmp *= thickness
cor[1:-1, i] = np.triu(tmp).sum(axis=0)
linattout = pd.DataFrame(
linattout, columns=energy, index=range(self.nlayers + 2)
)
cor = pd.DataFrame(cor, columns=energy, index=range(self.nlayers + 2))
return {"linatt": linattout, "linatt_cumulcor": cor}
def _cum_attenuation(self, z, energy):
"""Total attenuation from surface to z
Args:
z(num|array): depth of attenuation
energy(num|array): energies to be attenuation
Returns:
array: nz x nenergy
"""
lz = self._zlayer(z)
att = self.getcache("attenuationinfo")
linatt = att["linatt"].loc[lz][energy]
cor = att["linatt_cumulcor"].loc[lz][energy]
if linatt.ndim != 0:
linatt = linatt.values
cor = cor.values
if linatt.ndim == 2:
z = z[:, np.newaxis]
return z * linatt + cor
def _transmission(self, zi, zj, cosaij, energy):
"""Transmission from depth zi to zj
Args:
zi(num|array): start depth of attenuation (nz)
zj(num|array): end depth of attenuation (nz)
cosaij(num|array): angle with surface normal (nz)
energy(num|array): energies to be attenuation (nenergy)
Returns:
array: nz x nenergy
"""
datt = self._cum_attenuation(zj, energy) - self._cum_attenuation(zi, energy)
if datt.ndim == 2:
if instance.isarray(cosaij):
cosaij = cosaij[:, np.newaxis]
# assert(sum(instance.asarray(-datt/cosaij)>0)==0)
return np.exp(-datt / cosaij)
def _cache_interactioninfo(
self, energy, emin=None, emax=None, ninteractions=None, geomkwargs=None
):
"""
Args:
energy(array): nSource x nSourceLines
"""
def getenergy(x, **kwargs):
return list(listtools.flatten(line.energy(**kwargs) for line in x.columns))
# probabilities: list of pandas dataframes (one for each interaction)
# which saves the interaction probability of a layer
# at a particular energy
# column: line as a result of an interaction
# index: [layer_index, energy_index]
# value: interaction probability (1/cm/srad)
# energy_to_index: list of functions (one for each interaction)
# to get the energy_index closest to an energy
_nlayers = self.nlayers + 2
_ninteractions = ninteractions + 2
probabilities = [None] * _ninteractions
energy_to_index = [None] * _ninteractions
interactioninfo = {
"probabilities": probabilities,
"energy_to_index": energy_to_index,
"getenergy": getenergy,
}
# Interaction 0 has no probabilities
# this is the source, not the result of an interaction
source = [xrayspectrum.RayleighLine(energy)]
probabilities[0] = pd.DataFrame(columns=source)
# Calculate interaction probabilities (ph/cm/srad)
for i in range(ninteractions):
# Line energies after previous interaction
energyi = getenergy(probabilities[i], **geomkwargs)
nenergyi = len(energyi)
# Interaction probabilities of each energy with each layer
probs = [None] * _nlayers
probs[1:-1] = [
pd.DataFrame.from_dict(
dict(
layer.xrayspectrum(energyi, emin=emin, emax=emax).probabilities
)
)
for layer in self
]
probs[0] = pd.DataFrame(index=range(nenergyi))
probs[-1] = probs[0]
probs = | pd.concat(probs, sort=True) | pandas.concat |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from dataclasses import dataclass
from typing import List, Tuple, Dict, Optional
from datetime import date
from datetime import timedelta
from pathlib import Path
import click
import pandas as pd
import numpy as np
from scipy.stats import poisson
# %%
@dataclass
class Department:
""" Define the properties of a department"""
name: str
employees: float
intracomm: float
intercomm: float
friendscomm: float
@dataclass
class Channel:
""" Define baseline properties of different communication media """
name: str
freq: int
@dataclass
class Employee:
""" Communication behavior of employee"""
idx: int
department: str
remote: bool
fulltime: bool
communicative: float
channel_baseline: List[int]
intracomm: float
intercomm: float
friendscomm: float
friends: List[int]
hasCommunicationIssues: bool
@dataclass
class Communication:
""" Defines a single communication (row in the communication graph) """
sender: int
recver: int
channel: str
interactions: int
date: str
def generate_behavior(cfg: Dict, seed: Optional[int] = None, permutation_rate: float = 0.1) -> Tuple[Dict, Dict]:
"""
For given configuration generate preferences and behavior for each employee
"""
np.random.seed(seed)
stats = {}
stats['employees_per_department'] = [int(dep.employees * cfg['employees']) for dep in cfg['departments'].values()]
# Number of employees per department
stats['department_per_employee'] = []
_ = [stats['department_per_employee'].extend([dep.name,] * cnt) for cnt, dep in zip(stats['employees_per_department'], cfg['departments'].values())]
# Select a number of other employees as friends (frequent communication patners)
# Let it be a normal distribution around 5, with a standard deviation of 2 and a minimal value of 1
total_employee_cnt = sum(stats['employees_per_department'])
employee_friends = [np.random.choice(range(total_employee_cnt), size=max(1, int(nfriends))) for nfriends in np.round(np.random.normal(loc=5, scale=2, size=total_employee_cnt), decimals=0)]
stats['remote'] = [True if p < cfg['ratio_of_remote_employees'] else False for p in np.random.uniform(low=0.0, high=1.0, size=total_employee_cnt)]
stats['fulltime'] = [True if p < cfg['ratio_of_fulltime_employees'] else False for p in np.random.uniform(low=0.0, high=1.0, size=total_employee_cnt)]
# min_comm_behavior = 0.1
stats['base_comm_behavior'] = [max(1, int(x)) for x in np.random.normal(loc=10, scale=3, size=total_employee_cnt)]
# A dictionary of employee_idx per department (employeeidx starts at 1000)
stats['departmentlist'] = {dep_name: [] for dep_name in cfg['departments'].keys()}
stats['hasCommunicationIssues'] = np.random.choice([True, False], p=[permutation_rate, 1.0 - permutation_rate], size=total_employee_cnt)
# Build a dictionary of employees, indexed by their idx
employees = {}
for i, (department, base_comm, remote, fulltime, friends, hasCI) in enumerate(
zip(stats['department_per_employee'], stats['base_comm_behavior'], stats['remote'], stats['fulltime'], employee_friends, stats['hasCommunicationIssues'])):
# employee idx starts
idx=i
# If not working fulltime, reduce comm
if fulltime:
worktime = 1.0
else:
worktime = 0.5
# Remote workers comm a bit more
if remote:
remotetime = 1.5
else:
remotetime = 1.0
# define how many different interactions (i.e. node degree) a employee has per day
communicative = int(base_comm*worktime*remotetime)
# Simple perturbation. This employee will have very few interactions
if hasCI:
communicative = 3
# define the intensity of those interactions per channel
# media.freq * employee_communication_tendency * media_bias
channel_comm_freq = [cfg['channels'][m].freq*media_bias
for m, media_bias in zip(cfg['channels'], np.random.uniform(low=0.5, high=1.5, size=len(cfg['channels'])))]
channel_comm_freq = np.int_(np.round(channel_comm_freq, decimals=0))
intracomm = cfg['departments'][department].intracomm * float(np.random.uniform(low=0.5, high=1.5, size=1))
intercomm = cfg['departments'][department].intercomm * float(np.random.uniform(low=0.5, high=1.5, size=1))
friendscomm = cfg['departments'][department].friendscomm * float(np.random.uniform(low=0.5, high=1.5, size=1))
comm_summ = intracomm + intercomm + friendscomm
intracomm *= 1.0/comm_summ
intercomm *= 1.0/comm_summ
friendscomm *= 1.0/comm_summ
employees[idx]=Employee(idx=idx,
department=department,
remote=remote,
fulltime=fulltime,
communicative=communicative,
channel_baseline=channel_comm_freq,
intracomm = intracomm,
intercomm = intercomm,
friendscomm = friendscomm,
friends=friends,
hasCommunicationIssues=hasCI)
stats['departmentlist'][department].append(idx)
return stats['departmentlist'], employees
def _getIdx(employee: Employee, groups: List[int], departmentlist: Dict[str, int]) -> List[int]:
"""
For given employee and groups (department, other department, friends) return matching employee idx
"""
idxs = []
all_employees = []
_ = [all_employees.extend(x) for x in departmentlist.values()]
for group in groups:
if group == 0:
# same department
idx = np.random.choice(departmentlist[employee.department])
elif group == 1:
# Other departments
idx = np.random.choice(all_employees)
elif group == 2:
# Friends
idx = np.random.choice(employee.friends)
else:
raise Exception('Invalid group id')
idxs.append(idx)
return idxs
def get_communication_for_employee(employee: Employee, date: str, departmentlist: Dict[str, int], channels: List[str]) -> List[Communication]:
"""
For given employee create all communication events for given day
"""
degree = np.random.poisson(employee.communicative)
groups = np.random.choice([0, 1, 2], p=[employee.intracomm, employee.intercomm, employee.friendscomm], size=degree)
dests = _getIdx(employee, groups, departmentlist)
chls = np.random.choice(range(len(channels)), size=degree)
channel_names = [channels[x] for x in chls]
intensities = [np.random.poisson(employee.channel_baseline[channel]) for channel in chls]
comms = []
for dest, channel, intensity in zip(dests, channel_names, intensities):
comms.append(
Communication(
sender=employee.idx,
recver=dest,
channel=channel,
interactions=intensity,
date=date
)
)
return comms
def store_results(path: str, comms: List[Communication], employees: List[Employee], prefix: Optional[str] = ''):
"""
Store communication and employees as csv files
"""
commpath = Path(path).joinpath(prefix+'communication.csv')
employeepath = Path(path).joinpath(prefix+'employees.csv')
commDF = | pd.DataFrame(comms) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
单变量分析中常用工具,主要包含以下几类工具:
1、自动分箱(降基)模块:包括卡方分箱、Best-ks分箱
2、基本分析模块,单变量分析工具,以及woe编码工具,以及所有变量的分析报告
3、单变量分析绘图工具,如AUC,KS,分布相关的图
"""
# Author: <NAME>
import numpy as np
import pandas as pd
from abc import abstractmethod
from abc import ABCMeta
from sklearn.utils.multiclass import type_of_target
from pandas.api.types import is_numeric_dtype
import warnings
import time
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows
class Kit(object):
"""
常用工具类
"""
def __init__(self, positive_label=1, negative_label=0):
self.positive_label = positive_label
self.negative_label = negative_label
pass
def cond_insert_ind(self, cond, ind):
"""
为分箱结果中添加需要独立分箱的部分
cond : 待处理的分箱结果
arr : 需要独立分箱的数据集合,不管是数值型还是非数值型变量cond,请使用list添加
"""
if isinstance(cond, list):
cond = list(set(cond + ind))
cond.sort()
else:
n = len(ind)
arr = list(set(ind).difference(set(cond.keys())))
for k, v in cond.items():
cond[k] = v + n
for i in range(len(arr)):
cond[arr[i]] = i
return cond
def make_bin(self, df, var_name, cond, precision=3):
"""
基于cond中的分箱条件,为df中var_name的变量匹配对应的分箱值
"""
if isinstance(cond, list):
df["bin"] = pd.cut(df[var_name], cond, duplicates='drop', precision=precision)
elif isinstance(cond, dict):
mapping = pd.Series(cond).reset_index().rename({"index": var_name, 0: "bin"}, axis=1)
df = df[[var_name]].merge(mapping, on=var_name, how='left').set_index(df[[var_name]].index)
else:
raise ValueError("参数cond的类型只能为list或者dict")
return df["bin"]
def woe_code(self, df, var_name, woeDict):
"""
对样本的数据进行woe编码,返回完成编码后的
"""
if isinstance(df[var_name].dtype, pd.core.dtypes.dtypes.CategoricalDtype):
mapping = pd.Series(woeDict).reset_index().rename({"index": var_name, 0: "woe"}, axis=1)
breaks = mapping[var_name].to_list()
breaks.insert(0, -np.inf)
mapping[var_name] = pd.cut(mapping[var_name], breaks, duplicates="drop")
else:
mapping = pd.Series(woeDict).reset_index().rename({"index": var_name, 0: "woe"}, axis=1)
df = df.merge(mapping, on=var_name, how='left').set_index(df.index)
return df["woe"]
def univerate(self, df, var_name, target, lamb=0.001, retWoeDict=False):
"""
单变量分析函数,目前支持的计算指标为 IV,KS,LIFT
建议用于编码后的数值型变量进行分析,若在前面使用了cond_insert方法调整了cond
"""
# dti = pd.crosstab(df[var_name], df[target])
dti = df.groupby([var_name, target])[target].count().unstack().fillna(0)
dti.rename({self.positive_label: "positive", self.negative_label: "negative"}, axis=1, inplace=True)
dti["positive"] = dti["positive"].astype(int)
dti["negative"] = dti["negative"].astype(int)
p_t = dti["positive"].sum()
n_t = dti["negative"].sum()
t_t = p_t + n_t
r_t = p_t / t_t
dti["total"] = dti["positive"] + dti["negative"]
dti["total_rate"] = dti["total"] / t_t
dti["positive_rate"] = dti["positive"] / dti["total"] # (rs["positive"] + rs["negative"])
dti["negative_cum"] = dti["negative"].cumsum()
dti["positive_cum"] = dti["positive"].cumsum()
dti["woe"] = np.log(((dti["negative"] / n_t) + lamb) / ((dti["positive"] / p_t) + lamb))
dti["LIFT"] = dti["positive_rate"] / r_t
dti["KS"] = np.abs((dti["positive_cum"] / p_t) - (dti["negative_cum"] / n_t))
dti["IV"] = (dti["negative"] / n_t - dti["positive"] / p_t) * dti['woe']
IV = dti["IV"].sum()
KS = dti["KS"].max()
dti["IV"] = IV
dti["KS"] = KS
dti = dti.reset_index()
dti.columns.name = None
dti.rename({"Total": "num", var_name: "bin"}, axis=1, inplace=True)
dti.insert(0, "target", [target] * dti.shape[0])
dti.insert(0, "var", [var_name] * dti.shape[0])
if retWoeDict:
if isinstance(dti["bin"].dtype, pd.core.dtypes.dtypes.CategoricalDtype):
dti["v"] = dti["bin"].map(lambda x: x.right)
else:
dti["v"] = dti["bin"]
woeDict = pd.Series(dti["woe"].values, index=dti["v"].values).to_dict()
# # 修正根据分箱后,空分组,对应的woe值
# if cond0 is not None:
# right0 = set(cond0[1:])
# right1 = set(woeDict.keys())
# for key in right0.difference(right1):
# woeDict[key] = 0
dti.drop(columns=["negative_cum", "positive_cum", "v"], inplace=True)
return dti, woeDict
dti.drop(columns=["negative_cum", "positive_cum"], inplace=True)
return dti
def is_numeric(self, series):
"""
判断变量是否为数值型变量
"""
return is_numeric_dtype(series)
def missing_count(self, series):
"""
计算变量缺失率
"""
missing_index = pd.isna(series)
return missing_index.sum()
def unique_count(self, series):
"""
计算变量的枚举值数量
"""
unique_arr = pd.unique(series)
return unique_arr.size
def csi(self, base, df, var_name):
"""
计算不同数据集之间,同一个变量csi
"""
count1 = base.groupby(var_name)[var_name].count()
count2 = df.groupby(var_name)[var_name].count()
t1 = count1.sum()
t2 = count2.sum()
c1 = count1 / t1
c2 = count2 / t2
csi = (c1 - c2) * np.log(c1 / c2)
return csi.sum()
def group_rs(self, data, group, sum_col=[], count_col=[], rate_tupes=[]):
"""
业务分析工具类,同时对比计算多个target指标,查看结果
data : 数据集
sum_col : 需要group_sum的列
count_col : 需要group_count的列
rate_tupe : 需要除法计算的列 格式为 (字段1,字段2,新列名称) 或者 (字段,新列名称)
"""
grouped = data.groupby(group)
grouped_count = grouped[count_col].count()
grouped_sum = grouped[sum_col].sum()
grouped = pd.concat([grouped_count, grouped_sum], axis=1)
for tup in rate_tupes:
size = len(tup)
if size == 3:
grouped[tup[2]] = grouped[tup[0]] / grouped[tup[1]]
if size == 2:
grouped[tup[1]] = grouped[tup[0]] / grouped[tup[0]].sum()
return grouped.reset_index()
def batch_fillna(self, df, var_list, num_fill=-1, cate_fill="NA", suffix="_new"):
"""
批量填充缺失值
"""
for var_name in var_list:
var_name_new = var_name + suffix
if self.is_numeric(df[var_name]):
df[var_name_new] = df[var_name].fillna(num_fill)
else:
df[var_name_new] = df[var_name].fillna(cate_fill)
return df
def varlist_suffix(self, var_list, suffix):
return [var_name + suffix for var_name in var_list]
def feature_engine(self, datas, var_list, target, discretize, max_bin=6, precision=4, num_fill=-1, cate_fill="NA",
num_ind=None, cate_ind=None, fill_suffix="_fill", bin_suffix="_bin", woe_suffix="_woe",
path=None):
"""
批量对数据集进行自动化分箱和编码
Parameters
----------
datas: 数据集,为dataframe的list,第一个数据集为训练集
var_list: 特征列表
target : 目标值
discretize : 分箱工具类
max_bin : 最大分箱数
num_fill : 数值型变量填充结果
cate_fill : 类别型变量填充结果
num_ind : 数值型变量中,需要独立插入的分箱 为 list
cate_ind : 字符型变量中,需要独立进行分箱的值 为 list
fill_suffix : 处理确实
bin_suffix : 分箱后生成对应分箱的后缀
woe_suffix : woe编码后的编码的后缀
retInfoDict : 返回分箱后的变量信息,采用嵌套的dict格式,单个变量的相关信息如下:
变量名 : { "cond" , "woeDict" }
"""
assert len(datas) >= 1, "至少需要一个数据集"
train = datas[0]
all_data = pd.concat(datas, axis=1)
info_dict = {}
for var_name in var_list:
print(f"开始处理变量:'{var_name}'")
missing = self.missing_count(train[var_name])
missing_rate = (missing * 1.0) / (train.shape[0])
unique = self.unique_count(train[var_name])
info_dict[var_name] = {}
info_dict[var_name]['missing'] = missing
info_dict[var_name]['missing_rate'] = missing_rate
info_dict[var_name]['unique'] = unique
is_numeric = self.is_numeric(train[var_name])
var_name_new = var_name
if is_numeric:
if num_fill is not None:
var_name_new = var_name + fill_suffix
for df in datas:
df[var_name_new] = df[var_name].fillna(num_fill)
cond = discretize.dsct(train, var_name, target, max_bin)
if num_ind is not None:
cond = self.cond_insert_ind(cond, num_ind)
type = 'numeric'
else:
if cate_fill is not None:
var_name_new = var_name + fill_suffix
for df in datas:
df[var_name_new] = df[var_name].fillna(cate_fill)
check = []
unique0 = set(train[var_name].unique())
for df in datas[1:]:
diff = unique0.difference(df[var_name].unique())
check.append(diff)
if len(check > 0):
cond = discretize.dsct(all_data, var_name_new, target, max_bin)
type = 'complex'
else:
cond = discretize.dsct(train, var_name_new, target, max_bin)
type = 'category'
if cate_ind is not None:
cond = self.cond_insert_ind(cond, cate_ind)
info_dict[var_name]['type'] = type
info_dict[var_name]['cond'] = cond
var_name_bin = var_name + bin_suffix
for df in datas:
df[var_name_bin] = self.make_bin(df, var_name_new, cond, precision=precision)
dti, woeDict = self.univerate(train, var_name_bin, target, retWoeDict=True)
var_name_woe = var_name + woe_suffix
for df in datas:
df[var_name_woe] = self.woe_code(df, var_name_bin, woeDict)
info_dict[var_name]['dti'] = dti
info_dict[var_name]['woeDict'] = woeDict
ks = dti.loc[0, 'KS']
iv = dti.loc[0, 'IV']
info_dict[var_name]['ks'] = ks
info_dict[var_name]['iv'] = iv
if path is not None:
wb = Workbook()
ws1 = wb.active
ws1.title = '变量信息汇总'
ws2 = wb.create_sheet(title='分箱信息')
info = []
dtis = []
for v in info_dict.keys():
info = info_dict[v]
info.append(
[v, info['type'], info['missing'], info['missing_rate'], info['unique'], info['ks'], info['iv']])
dtis.append(info_dict[v]['dti'])
sheet1_data = pd.DataFrame(info,
columns=['var_name', 'type', 'missing', 'missing_rate', 'unique', 'ks', 'iv'])
sheet2_data = pd.concat(dtis, axis=1)
sheet2_data['bin'] = sheet2_data['bin'].astype(str)
for r in dataframe_to_rows(sheet1_data):
ws1.append(r)
for r in dataframe_to_rows(sheet2_data):
ws2.append(r)
wb.save(path)
return [datas], info_dict
def re_bin_woe(self, datas, var_name, target, cond, bin_suffix="_", woe_suffix="_woe"):
"""
对相应的变量进行再分箱,并重新计算相应的woe,注意datas中的第一个数据集为用于计算woe的训练集
"""
class Discretize(metaclass=ABCMeta):
"""
离散化基类,包含了基本的参数定义和特征预处理的方法
注:分箱的预处理过程中就会剔除x变量中的缺失值,若要将缺失值也纳入分箱运算过程,请先在数据中进行填充
Parameters
----------
init_thredhold: int 初始化分箱的数量,若为空则钚进行初始化的分箱
init_method : str 初始化方法默认为'qcut' , 初始化分箱方法,目前仅支持 'qcut' 和 'cut'
print_process : bool 是否答应分箱的过程信息
positive_label : 正样本的定义,根据target的数据类型来定
negative_label : 负样本的定义,根据target的数据类型来定
num_fillna : 数字变量的缺失值填充,默认为None;若为None,在计算分箱的过程中会剔除缺失部分的数据
cate_fillna : 类别变量的缺失值填充,默认为None;若为None,在计算分箱的过程中会剔除缺失部分的数据
"""
def __init__(self, init_thredhold=100, init_method='qcut', print_process=False,
positive_label=1, negative_label=0, num_fillna=None, cate_fillna=None):
self.init_thredhold = init_thredhold
self.init_method = init_method
self.print_process = print_process
self.positive_label = positive_label
self.negative_label = negative_label
self.num_fillna = num_fillna
self.cate_fillna = cate_fillna
def _init_data(self, data, var_name, target, max_bin, precision):
"""
init the input:
1、校验自变量和因变量的类型
2、判断x是否为数值类型
3、初始化数据结果
"""
if self.print_process: print("开始对变量'{}'使用'{}'分箱:".format(var_name, self.__class__.__name__))
time0 = time.time()
assert var_name in data.columns, "数据中不包含变量%s,请检查数据" % (var_name)
assert var_name != target, "因变量和自变量必须是不同的变量"
data = data[[var_name, target]].copy()
self._y_check(data[data[target].notnull()][target])
is_numeric = is_numeric_dtype(data[var_name])
if is_numeric:
if self.num_fillna is not None:
data[var_name] = data[var_name].fillna(self.num_fillna)
data = data[(data[var_name].notnull()) & (data[target].notnull())]
if self.init_thredhold is not None:
if self.init_method == 'qcut':
data.loc[:, var_name] = pd.qcut(data[var_name], self.init_thredhold, duplicates="drop",
precision=precision)
elif self.init_method == 'cut':
data.loc[:, var_name] = pd.cut(data[var_name], self.init_thredhold, duplicates="drop",
precision=precision)
else:
raise ValueError("init_method参数仅有qcut和cut两个选项")
data.loc[:, var_name] = data[var_name].map(lambda x: x.right).astype(float)
dti = pd.crosstab(data[var_name], data[target], dropna=False).reset_index()
dti.rename({self.negative_label: "negative", self.positive_label: "positive"}, axis=1, inplace=True)
dti["variable"] = dti[var_name]
mapping = None
else:
if self.cate_fillna is not None:
data[var_name] = data[var_name].fillna(self.cate_fillna)
data = data[(data[var_name].notnull()) & (data[target].notnull())]
dti = pd.crosstab(data[var_name], data[target]).reset_index()
dti.rename({self.negative_label: "negative", self.positive_label: "positive"}, axis=1, inplace=True)
dti["positive_rate"] = dti["positive"] / (dti["positive"] + dti["negative"])
dti = dti.sort_values("positive_rate").reset_index(drop=True).reset_index()
dti.rename({"index": "variable"}, axis=1, inplace=True)
mapping = dti[[var_name, "variable", "negative", "positive"]]
dti = dti[["variable", "negative", "positive"]].copy()
if self.print_process:
time1 = time.time()
print("==>已经完成变量初始化耗时{:.2f}s,开始处理0值".format(time1 - time0))
while (len(dti) > max_bin) and (len(dti.query('(negative==0) or (positive==0)')) > 0):
dti["count"] = dti["negative"] + dti["positive"]
rm_bk = dti.query("(negative==0) or (positive==0)") \
.query("count == count.min()")
ind = rm_bk["variable"].index[0]
if ind == dti["variable"].index.max():
dti.loc[ind - 1, "variable"] = dti.loc[ind, "variable"]
else:
dti.loc[ind, "variable"] = dti.loc[ind + 1, "variable"]
dti = dti.groupby("variable")[["negative", "positive"]].sum().reset_index()
if self.print_process:
time2 = time.time()
print("==>已经完成0值处理耗时{:.2f}s,开始进行分箱迭代".format(time2 - time1))
return dti, var_name, is_numeric, mapping
def _normalize_output(self, dti, var_name, is_numeric, mapping):
"""
根据分箱后计算出来的结果,标准化输出
"""
break_points = dti['variable'].copy()
break_points[np.where(break_points == break_points.max())[0]] = np.inf
break_points = np.concatenate(([-np.inf], break_points))
if is_numeric:
cond = list(break_points)
else:
interval_index = pd.IntervalIndex.from_breaks(break_points, closed='right')
mapping["bin"] = mapping["variable"].map(lambda x: np.where(interval_index.contains(x))[0][0])
cond = pd.Series(mapping["bin"].values, index=mapping[var_name].values).to_dict()
return cond
# def unique_noNA(self, x: pd.Series):
# """
# pandas 中存在bug,许多场景的group 和 crosstab中的dropna参数的设定会失效,
# 在分箱的过程中剔除缺失值,若要考虑缺失值的场景,请提前对数据进行fillNa操作。
# """
# return np.array(list(filter(lambda ele: ele == ele, x.unique())))
def _x_check(self, dat, var_name):
"""
对自变量进行校验:校验是否存在缺失值
------------------------------
Return
若自变量中存在缺失值, 报错
"""
x_na_count = pd.isna(dat[var_name]).sum()
assert x_na_count != 0, f"自变量'{var_name}'中存在缺失值,自动分箱前请处理自变量中的缺失值"
def _y_check(self, y: pd.Series):
"""
校验y值是否符合以下两个条件:
1、y值必须是二分类变量
2、positive_label必须为y中的结果
------------------------------
Param
y:exog variable,pandas Series contains binary variable
------------------------------
"""
y_type = type_of_target(y)
# if y_type not in ['binary']:
# raise ValueError('目标变量必须是二元的!')
# if self.positive_label not in y:
# raise ValueError('请根据设定positive_label')
unique = y.unique()
assert y_type in ['binary'], "目标必须是二分类"
# assert not y.hasnans, "target中不能包含缺失值,请优先进行填充"
assert self.positive_label in unique, "请根据target正确设定positive_label"
assert self.negative_label in unique, "请根据target的结果正确设定negative_label"
def _check_target_type(self, y):
"""
判断y的类型,将y限定为 0和1 的数组。
"""
warnings.warn("some_old_function is deprecated", DeprecationWarning)
y_unique = y.unique()
if len(y_unique) != 2:
raise ValueError("y必须是二值型变量,且其元素必须是 0 1")
for item in y_unique:
if item not in [0, 1]:
raise ValueError("y必须是二值型变量,且其元素必须是 0 1")
@abstractmethod
def dsct(self, df, var_name, target, max_bin=12, precision=4):
"""
抽象接口,定义分箱方法的名称和入参,只能调用子类实现
Parameters
----------
dat: 数据集,格式必须为pd.DataFrame
var_name: 待分箱的因变量
target: 目标变量
precision : 数值型变量的分箱进度
"""
raise NotImplementedError("该方法只为定义基本的函数接口,不可直接调用,请使用子类实现的方法")
class ChiMerge(Discretize):
"""
卡方分箱法
"""
def dsct(self, df, var_name, target, max_bin=12, precision=4):
dti, var_name, is_numeric, mapping = self._init_data(df, var_name, target, max_bin, precision)
time0 = time.time()
dti["chi2"] = dti.apply(lambda row: self._calc_chi2(dti, row), axis=1)
while len(dti) > max_bin:
min_chi2_ind = dti.query("chi2 == chi2.min()").index[0]
if min_chi2_ind == dti.index.max():
# 更新正负样本数量,将前一行的数据与此行的数据相加
dti.loc[min_chi2_ind, "negative"] = dti.loc[min_chi2_ind, "negative"] + dti.loc[
min_chi2_ind - 1, "negative"]
dti.loc[min_chi2_ind, "positive"] = dti.loc[min_chi2_ind, "positive"] + dti.loc[
min_chi2_ind - 1, "positive"]
# 只需要更新当前行的chi2值
a = dti.loc[min_chi2_ind, "negative"]
b = dti.loc[min_chi2_ind, "positive"]
c = dti.loc[min_chi2_ind - 2, "negative"]
d = dti.loc[min_chi2_ind - 2, "positive"]
dti.loc[min_chi2_ind, "chi2"] = self._chi2(a, b, c, d)
# 删除前一行
dti = dti.drop(index=min_chi2_ind - 1)
dti.reset_index(drop=True, inplace=True)
elif min_chi2_ind == dti.index.min() + 1:
# 删除前一行前,需更新当前行的正负样本数量,以及当前行的chi2值
dti.loc[min_chi2_ind, "negative"] = dti.loc[min_chi2_ind - 1, "negative"] + dti.loc[
min_chi2_ind, "negative"]
dti.loc[min_chi2_ind, "positive"] = dti.loc[min_chi2_ind - 1, "positive"] + dti.loc[
min_chi2_ind, "positive"]
dti.loc[min_chi2_ind, "chi2"] = np.inf
# 更新后一行的chi2值
a = dti.loc[min_chi2_ind, "negative"]
b = dti.loc[min_chi2_ind, "positive"]
c = dti.loc[min_chi2_ind + 1, "negative"]
d = dti.loc[min_chi2_ind + 1, "positive"]
dti.loc[min_chi2_ind + 1, "chi2"] = self._chi2(a, b, c, d)
# 删除前一行
dti = dti.drop(index=min_chi2_ind - 1)
dti.reset_index(drop=True, inplace=True)
else:
# 删除前一行前,需更新当前行的正负样本数量,以及当前行的chi2值
dti.loc[min_chi2_ind, "negative"] = dti.loc[min_chi2_ind - 1, "negative"] + dti.loc[
min_chi2_ind, "negative"]
dti.loc[min_chi2_ind, "positive"] = dti.loc[min_chi2_ind - 1, "positive"] + dti.loc[
min_chi2_ind, "positive"]
a = dti.loc[min_chi2_ind, "negative"]
b = dti.loc[min_chi2_ind, "positive"]
c = dti.loc[min_chi2_ind - 2, "negative"]
d = dti.loc[min_chi2_ind - 2, "positive"]
dti.loc[min_chi2_ind, "chi2"] = self._chi2(a, b, c, d)
# 更新后一行的chi2值
c = dti.loc[min_chi2_ind + 1, "negative"]
d = dti.loc[min_chi2_ind + 1, "positive"]
dti.loc[min_chi2_ind + 1, "chi2"] = self._chi2(a, b, c, d)
# 删除前一行
dti = dti.drop(index=min_chi2_ind - 1)
dti.reset_index(drop=True, inplace=True)
# dti.loc[min_chi2_ind - 1, "variable"] = dti.loc[min_chi2_ind, "variable"]
# dti = dti.groupby("variable")[["negative", "positive"]].sum().reset_index()
if self.print_process:
time1 = time.time()
print("==>完成分箱迭代耗时{:.2f}s".format(time1 - time0))
return self._normalize_output(dti, var_name, is_numeric, mapping)
def _calc_chi2(self, dti, row):
ind0 = dti[dti['variable'] == row['variable']].index[0]
if ind0 == dti.index.min():
return np.inf
ind1 = ind0 - 1
a = dti.loc[ind1, 'negative']
b = dti.loc[ind1, 'positive']
c = dti.loc[ind0, 'negative']
d = dti.loc[ind0, 'positive']
return self._chi2(a, b, c, d)
def _chi2(self, a, b, c, d):
"""
如下横纵标对应的卡方计算公式为: K^2 = n (ad - bc) ^ 2 / [(a+b)(c+d)(a+c)(b+d)] 其中n=a+b+c+d为样本容量
y1 y2
x1 a b
x2 c d
:return: 卡方值
"""
a, b, c, d = float(a), float(b), float(c), float(d)
return ((a + b + c + d) * ((a * d - b * c) ** 2)) / ((a + b) * (c + d) * (b + d) * (a + c))
class ChiMergeV0(Discretize):
"""
卡方分箱法
"""
def dsct(self, df, var_name, target, max_bin=12, precision=4):
dti, var_name, is_numeric, mapping = self._init_data(df, var_name, target, max_bin, precision)
time0 = time.time()
while len(dti) > max_bin:
dti["chi2"] = dti.apply(lambda row: self._calc_chi2(dti, row), axis=1)
min_chi2_ind = dti.query("chi2 == chi2.min()").index[0]
dti.loc[min_chi2_ind - 1, "variable"] = dti.loc[min_chi2_ind, "variable"]
dti = dti.groupby("variable")[["negative", "positive"]].sum().reset_index()
if self.print_process:
time1 = time.time()
print("==>完成分箱迭代耗时{:.2f}s".format(time1 - time0))
return self._normalize_output(dti, var_name, is_numeric, mapping)
def _calc_chi2(self, dti, row):
ind0 = dti[dti['variable'] == row['variable']].index[0]
if ind0 == dti.index.min():
return np.inf
ind1 = ind0 - 1
a = dti.loc[ind1, 'negative']
b = dti.loc[ind1, 'positive']
c = dti.loc[ind0, 'negative']
d = dti.loc[ind0, 'positive']
return self._chi2(a, b, c, d)
def _chi2(self, a, b, c, d):
"""
如下横纵标对应的卡方计算公式为: K^2 = n (ad - bc) ^ 2 / [(a+b)(c+d)(a+c)(b+d)] 其中n=a+b+c+d为样本容量
y1 y2
x1 a b
x2 c d
:return: 卡方值
"""
a, b, c, d = float(a), float(b), float(c), float(d)
return ((a + b + c + d) * ((a * d - b * c) ** 2)) / ((a + b) * (c + d) * (b + d) * (a + c))
class BestKS(Discretize):
"""
Best-KS 分箱法
"""
def dsct(self, df, var_name, target, max_bin=12, precision=4):
dti, var_name, is_numeric, mapping = self._init_data(df, var_name, target, max_bin, precision)
time0 = time.time()
dti["count"] = dti["negative"] + dti["positive"]
dti["tmp"] = 0
uni_tmp = pd.unique(dti["tmp"])
while (len(uni_tmp) < max_bin) and (len(dti) > max_bin):
grouped_count = dti.groupby("tmp")[["count"]].sum()
max_len_tmp_v = grouped_count.query("count == count.max() ").index[0]
df_tmp = dti[dti["tmp"] == max_len_tmp_v].copy()
min_variable = df_tmp["variable"].min()
max_variable = df_tmp["variable"].max()
if min_variable == max_variable:
break
df_tmp["cum_n"] = df_tmp["negative"].cumsum()
df_tmp["cum_p"] = df_tmp["positive"].cumsum()
n_t = df_tmp["negative"].sum()
p_t = df_tmp["positive"].sum()
df_tmp["ks"] = np.abs((df_tmp["cum_n"] / n_t) - (df_tmp["cum_p"] / p_t))
besk_ks_variable = df_tmp.query(" ks == ks.max() ")["variable"].values[0]
if besk_ks_variable == max_variable:
dti.loc[dti["variable"] == max_variable, "tmp"] = (uni_tmp.max() + 1) # (dti["tmp"].max() + 1)
else:
dti.loc[(dti["variable"] >= min_variable) & (dti["variable"] <= besk_ks_variable), "tmp"] = \
(uni_tmp.max() + 1) # (dti["tmp"].max() + 1)
uni_tmp = pd.unique(dti["tmp"])
dti["variable"] = dti["tmp"].map(lambda x: dti[dti["tmp"] == x]["variable"].max())
dti = dti.groupby("variable")[["negative", "positive"]].sum().reset_index()
if self.print_process:
time1 = time.time()
print("==>完成分箱迭代耗时{:.2f}s".format(time1 - time0))
return self._normalize_output(dti, var_name, is_numeric, mapping)
class BestBin(Discretize):
"""
考虑排序性的分箱法,基于传入分箱工具类,控制单调性的分箱法
"""
def __init__(self, discretize, min_bin=3):
self.discretize = discretize
self.min_bin = min_bin
self.kit = Kit()
def check_posRate_monotone(self, dti):
"""
校验分箱单调性
"""
if dti.shape[0] <= 2:
return True
diff = dti["positive_rate"].diff()[1:]
if len(diff[diff >= 0]) == len(diff) or len(diff[diff <= 0]) == len(diff):
return True
else:
return False
def dsct(self, df, var_name, target, max_bin=12, precision=4):
"""
考虑单调性的分箱法,对原有的分箱方法进行风向
"""
time0 = time.time()
if self.discretize.print_process: print(
"=========>>对变量'{}'启动'{}'的最优分箱".format(var_name, self.discretize.__class__.__name__))
cond = self.discretize.dsct(df, var_name, target, max_bin=max_bin)
var_name_new = var_name + "_bin"
df[var_name_new] = self.kit.make_bin(df, var_name, cond)
dti = self.kit.univerate(df, var_name_new, target)
while (not self.check_posRate_monotone(dti)) and len(dti) > self.min_bin:
max_bin = max_bin - 1
cond = self.discretize.dsct(df, var_name, target, max_bin)
df[var_name_new] = self.kit.make_bin(df, var_name, cond)
dti = self.kit.univerate(df, var_name_new, target)
time1 = time.time()
if self.discretize.print_process: print("=========>>最优分箱完成,耗时{:.2f}s".format(time1 - time0))
return cond
if __name__ == "__main__":
# rs = Discretization.unique_noNA(pd.Series(np.random.randint(0, 100, 10000)))
# print(rs)
| pd.set_option("display.max_columns", None) | pandas.set_option |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Processing workflow for the 1986 National Geographic Smell Survey data
import pandas as pd
import pyrfume
from pyrfume.odorants import from_cids, get_cids
df = pd.read_csv('NGS.csv', index_col=0).astype(int) # Load the data
df.index.name = 'Subject' # The index column is the subject number
data_dict = pd.read_excel('Data dictionary.xlsx', index_col=0) # Load the data dictionary
# Determine which integer value, if any, is used for no response (usually 0)
has_non_response_option = data_dict[data_dict['VALUES'].str.contains('No response') == True]
value_for_nan = has_non_response_option['VALUES'].apply(lambda x: x.split('=')[0]).astype(int)
# Replace the value for no response with Python `None`)
df = df.apply(lambda col: col.replace(value_for_nan.get(col.name, None), None))
# +
# Odorant abbreviations used in the column names
odorant_abbreviations = {'AND': 'Androstenone',
'AA': 'Isoamyl acetate',
'AMY': 'Isoamyl acetate',
'GAL': 'Galaxolide',
'GALAX': 'Galaxolide',
'EUG': 'Eugenol',
'MER': 'Mercaptans',
'MERCAP': 'Mercaptans',
'ROSE': 'Rose'}
# Question abbreviations used in the column names (see data dictionary for full question)
question_abbreviations = {'SMELL': 'Smell',
'QUAL': 'Quality',
'INT': 'Intensity',
'MEM': 'Memorable',
'EAT': 'Edible',
'WEAR': 'Wearable',
'DES': 'Descriptor'}
# List of unique odorant names
odorant_names = list(set(odorant_abbreviations.values()))
# -
# All (meta)-data not concerning the odorants themselves, i.e. information abou the subjects
metadata = df[[col for col in df if not any([col.startswith('%s_' % x) for x in odorant_abbreviations])]]
metadata.head()
# Save this subject data
metadata.to_csv('subjects.csv')
# +
# All data concerning the odorants themselves
data = df[[col for col in df if any([col.startswith('%s_' % x) for x in odorant_abbreviations])]]
def f(s):
"""Convert e.g. 'AA_QUAL' into ('Amyl Acetate', 'Quality')"""
odorant, question = s.split('_')
return odorant_abbreviations[odorant], question_abbreviations[question]
# Turn column header into a multiindex with odorants names and questions as separate levels
data.columns = pd.MultiIndex.from_tuples(data.columns.map(f).tolist(), names=('Odorant', 'Question'))
data.head()
# -
# From methods.txt
# PEA added due to common knowledge that
# this is primary ingredient of IFF rose
molecule_names = ['5a-androst-16-en-3-one',
'isoamyl acetate',
'Galaxolide',
'eugenol',
'tert-butyl mercaptan',
'isopropyl mercaptan',
'n-propyl mercaptan',
'sec-butyl mercaptan',
'phenyl ethyl alcohol']
# Get PubChem IDs for each odorant
names_to_cids = get_cids(molecule_names)
# Generate information about molecules
cids = list(names_to_cids.values())
molecules = pd.DataFrame(from_cids(cids)).set_index('CID').sort_index()
molecules.head()
names_to_cids
# Save this molecule data
molecules.to_csv('molecules.csv')
mixtures = pd.DataFrame(index=odorant_names, columns=[0]+cids)
# v/v * components ratios
mixtures.loc['Mercaptans'] = 0.04*pd.Series({6387: 0.76,
6364: 0.18,
7848: 0.04,
10560: 0.02})
mixtures.loc['Androstenone'] = 0.001*pd.Series({6852393: 1})
mixtures.loc['Isoamyl acetate'] = 1*pd.Series({31276: 1})
mixtures.loc['Eugenol'] = 1*pd.Series({3314: 1})
mixtures.loc['Galaxolide'] = 0.425* | pd.Series({91497: 1}) | pandas.Series |
#Author: <NAME>
#Created: July 13th 2018
import pandas as pd
import numpy as np
import nltk
import string
import csv
from sklearn.model_selection import train_test_split
from sklearn.ensemble import VotingClassifier
from mlxtend.classifier import StackingCVClassifier
#read in each of the feature csv files
class_labels = pd.read_csv('labels.csv',encoding='utf-8')
weighted_tfidf_score = pd.read_csv('tfidf_scores.csv',encoding='utf-8')
sentiment_scores = pd.read_csv('sentiment_scores.csv',encoding='utf-8')
dependency_features = pd.read_csv('dependency_features.csv',encoding='utf-8')
char_bigrams = | pd.read_csv('char_bigram_features.csv',encoding='utf-8') | pandas.read_csv |
#!/bin/env python3
"""create_csv_of_kp_predicate_triples.py
Creates a CSV of all predicate triples of the form (node type, edge type, node type) for KG1, KG2, and BTE (ARAX's current knowledge providers).
Resulting columns are: subject_type, edge_type, object_type
Usage: python create_csv_of_kp_predicate_triples.py
"""
# adapted from <NAME> code in create_csv_of_kp_node_pairs.py
import requests
import sys
import os
import csv
import time
import pandas as pd
from neo4j import GraphDatabase
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # code directory
from RTXConfiguration import RTXConfiguration
def run_neo4j_query(cypher, kg_name, data_type):
rtx_config = RTXConfiguration()
if kg_name != "KG1":
rtx_config.live = kg_name
driver = GraphDatabase.driver(rtx_config.neo4j_bolt, auth=(rtx_config.neo4j_username, rtx_config.neo4j_password))
with driver.session() as session:
start = time.time()
print(f"Grabbing {data_type} from {kg_name} neo4j...")
results = session.run(cypher).data()
print(f"...done. Query took {round((time.time() - start) / 60, 2)} minutes.")
driver.close()
return results
def get_kg1_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly that makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG1", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2c_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2c", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg1_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG1", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return | pd.DataFrame(labels_dict) | pandas.DataFrame |
import sys
from collections import deque
import numpy as np
import pandas as pd
import os
from sqlalchemy import create_engine
import re
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from sklearn.multioutput import MultiOutputClassifier
from sklearn.linear_model import *
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import *
from sklearn.metrics import *
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.externals import joblib
def load_data(database_filepath):
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = | pd.read_sql_table('P1Data', engine) | pandas.read_sql_table |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'TEST',
},
],
index='sid'
)
fof14_sid = 10000
futures = pd.DataFrame.from_records(
[
{
'sid': fof14_sid,
'symbol': 'FOF14',
'root_symbol': 'FO',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'FUT',
},
],
index='sid'
)
root_symbols = pd.DataFrame({
'root_symbol': ['FO'],
'root_symbol_id': [1],
'exchange': ['CME'],
})
with tmp_assets_db(
equities=equities, futures=futures, root_symbols=root_symbols) \
as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
fof14 = finder.retrieve_asset(fof14_sid)
cf = finder.create_continuous_future(
root_symbol=fof14.root_symbol, offset=0, roll_style='volume',
)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
# Futures
(finder, 'FOF14', None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, 'FOF14', unique_start, fof14),
# Futures int
(finder, fof14_sid, None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, fof14_sid, unique_start, fof14),
# ContinuousFuture
(finder, cf, None, cf),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
# Futures and Equities
(finder, ['FOF14', 0], None, [fof14, assets[0]]),
# ContinuousFuture and Equity
(finder, [cf, 0], None, [cf, assets[0]]),
)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21AM', tz='UTC'),
exchange='THE MOON',
)
asset3 = Asset(3, exchange="test")
asset4 = Asset(4, exchange="test")
asset5 = Asset(5, exchange="still testing")
def test_asset_object(self):
the_asset = Asset(5061, exchange="bar")
self.assertEquals({5061: 'foo'}[the_asset], 'foo')
self.assertEquals(the_asset, 5061)
self.assertEquals(5061, the_asset)
self.assertEquals(the_asset, the_asset)
self.assertEquals(int(the_asset), 5061)
self.assertEquals(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, exchange="test")
s_24 = Asset(24, exchange="test")
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_str(self):
strd = str(self.future)
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = repr(self.future)
self.assertIn("Future", reprd)
self.assertIn("2468", reprd)
self.assertIn("OMH15", reprd)
self.assertIn("root_symbol=%s'OM'" % ('u' if PY2 else ''), reprd)
self.assertIn(
"notice_date=Timestamp('2014-01-20 00:00:00+0000', tz='UTC')",
reprd,
)
self.assertIn(
"expiration_date=Timestamp('2014-02-20 00:00:00+0000'",
reprd,
)
self.assertIn(
"auto_close_date=Timestamp('2014-01-18 00:00:00+0000'",
reprd,
)
self.assertIn("tick_size=0.01", reprd)
self.assertIn("multiplier=500", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.write_assets(equities=frame)
finder = self.asset_finder
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([
{'symbol': 'PRTY_HRD', 'exchange': "TEST"},
{'symbol': 'BRKA', 'exchange': "TEST"},
{'symbol': 'BRK_A', 'exchange': "TEST"},
])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records(
[
# sid 0
{
'symbol': 'A',
'asset_name': 'Asset A',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'B',
'asset_name': 'Asset B',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
# sid 1
{
'symbol': 'C',
'asset_name': 'Asset C',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'A', # claiming the unused symbol 'A'
'asset_name': 'Asset A',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
],
index=[0, 0, 1, 1],
)
self.write_assets(equities=metadata)
finder = self.asset_finder
# note: these assertions walk forward in time, starting at assertions
# about ownership before the start_date and ending with assertions
# after the end_date; new assertions should be inserted in the correct
# locations
# no one held 'A' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
# no one held 'C' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
# from 01 through 05 sid 0 held 'A'
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(0),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
# from 01 through 05 sid 1 held 'C'
C_result = finder.lookup_symbol('C', asof)
assert_equal(
C_result,
finder.retrieve_asset(1),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
# no one held 'B' before 06
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
# no one held 'C' after 06, however, no one has claimed it yet
# so it still maps to sid 1
assert_equal(
finder.lookup_symbol('C', T('2014-01-07')),
finder.retrieve_asset(1),
)
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
# from 06 through 10 sid 0 held 'B'
# we test through the 11th because sid 1 is the last to hold 'B'
# so it should ffill
B_result = finder.lookup_symbol('B', asof)
assert_equal(
B_result,
finder.retrieve_asset(0),
msg=str(asof),
)
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
# from 06 through 10 sid 1 held 'A'
# we test through the 11th because sid 1 is the last to hold 'A'
# so it should ffill
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(1),
msg=str(asof),
)
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
self.assertEqual(
str(e.exception),
"Ambiguous ownership for 1 symbol, multiple assets held the"
" following symbols:\n"
"MULTIPLE:\n"
" intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'),"
" ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n"
" start_date end_date\n"
" sid \n"
" 1 2010-01-01 2012-01-01\n"
" 2 2010-01-01 2013-01-01\n"
" 3 2011-01-01 2012-01-01"
)
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
"""
If lookup_symbol is vectorized across multiple symbols, and one of them
is None, want to raise a TypeError.
"""
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, pd.Timestamp('2013-01-01'))
def test_lookup_mult_are_one(self):
"""
Ensure that multiple symbols that return the same sid are collapsed to
a single returned asset.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': symbol,
'start_date': date.value,
'end_date': (date + timedelta(days=30)).value,
'exchange': 'NYSE',
}
for symbol in ('FOOB', 'FOO_B')
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol('FOO/B', date + timedelta(1), fuzzy=True)
self.assertEqual(result.sid, 1)
def test_endless_multiple_resolves(self):
"""
Situation:
1. Asset 1 w/ symbol FOOB changes to FOO_B, and then is delisted.
2. Asset 2 is listed with symbol FOO_B.
If someone asks for FOO_B with fuzzy matching after 2 has been listed,
they should be able to correctly get 2.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'FOOB',
'start_date': date.value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
{
'sid': 1,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=31)).value,
'end_date': (date + timedelta(days=60)).value,
'exchange': 'NYSE',
},
{
'sid': 2,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=61)).value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol(
'FOO/B',
date + timedelta(days=90),
fuzzy=True
)
self.assertEqual(result.sid, 2)
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.write_assets(equities=data)
finder = self.asset_finder
results, missing = finder.lookup_generic(
['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end,
exchange="TEST")
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = self.asset_finder
asset1 = Equity(1, symbol="AAPL", exchange="TEST")
asset2 = Equity(2, symbol="GOOG", exchange="TEST")
asset200 = Future(200, symbol="CLK15", exchange="TEST")
asset201 = Future(201, symbol="CLM15", exchange="TEST")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_equity_info(
num_assets=num_assets,
first_start=first_start,
frequency=trading_day,
periods_between_starts=3,
asset_lifetime=5
)
self.write_assets(equities=frame)
finder = self.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.write_assets(equities=make_simple_equity_info(
[0, 1, 2],
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_lookup_by_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 2,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
]
)
self.write_assets(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
)
af = self.asset_finder
# Before sid 0 has changed ALT_ID.
dt = pd.Timestamp('2013-6-28', tz='UTC')
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_0.sid, 0)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
# We don't know about this ALT_ID yet.
with self.assertRaisesRegexp(
ValueNotFoundForField,
"Value '{}' was not found for field '{}'.".format(
'100000002',
'ALT_ID',
)
):
af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
# After all assets have ended.
dt = pd.Timestamp('2014-01-02', tz='UTC')
asset_2 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_2.sid, 2)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
self.assertEqual(asset_0.sid, 0)
# At this point both sids 0 and 2 have held this value, so an
# as_of_date is required.
expected_in_repr = (
"Multiple occurrences of the value '{}' found for field '{}'."
).format('100000000', 'ALT_ID')
with self.assertRaisesRegexp(
MultipleValuesFoundForField,
expected_in_repr,
):
af.lookup_by_supplementary_field('ALT_ID', '100000000', None)
def test_get_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 2,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
]
)
self.write_assets(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
)
finder = self.asset_finder
# Before sid 0 has changed ALT_ID and sid 2 has started.
dt = pd.Timestamp('2013-6-28', tz='UTC')
for sid, expected in [(0, '100000000'), (1, '100000001')]:
self.assertEqual(
finder.get_supplementary_field(sid, 'ALT_ID', dt),
expected,
)
# Since sid 2 has not yet started, we don't know about its
# ALT_ID.
with self.assertRaisesRegexp(
NoValueForSid,
"No '{}' value found for sid '{}'.".format('ALT_ID', 2),
):
finder.get_supplementary_field(2, 'ALT_ID', dt),
# After all assets have ended.
dt = pd.Timestamp('2014-01-02', tz='UTC')
for sid, expected in [
(0, '100000002'), (1, '100000001'), (2, '100000000'),
]:
self.assertEqual(
finder.get_supplementary_field(sid, 'ALT_ID', dt),
expected,
)
# Sid 0 has historically held two values for ALT_ID by this dt.
with self.assertRaisesRegexp(
MultipleValuesFoundForSid,
"Multiple '{}' values found for sid '{}'.".format('ALT_ID', 0),
):
finder.get_supplementary_field(0, 'ALT_ID', None),
def test_group_by_type(self):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
futures = make_commodity_future_info(
first_sid=6,
root_symbols=['CL'],
years=[2014],
)
# Intersecting sid queries, to exercise loading of partially-cached
# results.
queries = [
([0, 1, 3], [6, 7]),
([0, 2, 3], [7, 10]),
(list(equities.index), list(futures.index)),
]
self.write_assets(
equities=equities,
futures=futures,
)
finder = self.asset_finder
for equity_sids, future_sids in queries:
results = finder.group_by_type(equity_sids + future_sids)
self.assertEqual(
results,
{'equity': set(equity_sids), 'future': set(future_sids)},
)
@parameterized.expand([
(Equity, 'retrieve_equities', EquitiesNotFound),
(Future, 'retrieve_futures_contracts', FutureContractsNotFound),
])
def test_retrieve_specific_type(self, type_, lookup_name, failure_type):
equities = make_simple_equity_info(
range(5),
start_date= | pd.Timestamp('2014-01-01') | pandas.Timestamp |
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pathlib import Path
import re
from typing import Hashable, List, Tuple, Union
import zipfile
EMME_ENG_UNITS = {
'p': 1E-12,
'n': 1E-9,
'u': 1E-6,
'm': 0.001,
'k': 1000.0,
'M': 1E6,
'G': 1E9,
'T': 1E12
}
def process_emme_eng_notation_series(s: pd.Series, *, to_dtype=float) -> pd.Series: # TODO: create generic version...
"""A function to convert Pandas Series containing values in Emme's engineering notation"""
values = s.str.replace(r'\D+', '.', regex=True).astype(to_dtype)
units = s.str.replace(r'[\d,.]+', '', regex=True).map(EMME_ENG_UNITS).fillna(1.0)
return values * units
def read_nwp_base_network(nwp_fp: Union[str, Path]) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""A function to read the base network from a Network Package file (exported from Emme using the TMG Toolbox) into
DataFrames.
Args:
nwp_fp (Union[str, Path]): File path to the network package.
Returns:
Tuple[pd.DataFrame, pd.DataFrame]: A tuple of DataFrames containing the nodes and links
"""
nwp_fp = Path(nwp_fp)
if not nwp_fp.exists():
raise FileNotFoundError(f'File `{nwp_fp.as_posix()}` not found.')
header_nodes, header_links, last_line = None, None, None
with zipfile.ZipFile(nwp_fp) as zf:
for i, line in enumerate(zf.open('base.211'), start=1):
line = line.strip().decode('utf-8')
if line.startswith('c'):
continue # Skip comment lines
if line.startswith('t nodes'):
header_nodes = i
elif line.startswith('t links'):
header_links = i
last_line = i
# Read nodes
n_rows = header_links - header_nodes - 2
data_types = {
'c': str, 'Node': np.int64, 'X-coord': float, 'Y-coord': float, 'Data1': float, 'Data2': float,
'Data3': float, 'Label': str
}
nodes = pd.read_csv(zf.open('base.211'), index_col='Node', dtype=data_types, skiprows=header_nodes,
nrows=n_rows, delim_whitespace=True)
nodes.columns = nodes.columns.str.lower()
nodes.columns = nodes.columns.str.strip()
nodes.index.name = 'node'
nodes.rename(columns={'x-coord': 'x', 'y-coord': 'y'}, inplace=True)
nodes['is_centroid'] = nodes['c'] == 'a*'
nodes.drop('c', axis=1, inplace=True)
# Read links
n_rows = last_line - header_links - 1
links = pd.read_csv(zf.open('base.211'), index_col=['From', 'To'], skiprows=header_links, nrows=n_rows,
delim_whitespace=True, low_memory=False)
links.columns = links.columns.str.lower()
links.columns = links.columns.str.strip()
links.index.names = ['inode', 'jnode']
mask_mod = links['c'] == 'm'
n_modified_links = len(links[mask_mod])
if n_modified_links > 0:
print(f'Ignored {n_modified_links} modification records in the links table')
links = links[~mask_mod].drop('c', axis=1)
if 'typ' in links.columns:
links.rename(columns={'typ': 'type'}, inplace=True)
if 'lan' in links.columns:
links.rename(columns={'lan': 'lanes'}, inplace=True)
# Data type conversion
links = links.astype({'modes': str, 'type': int, 'lanes': int, 'vdf': int}) # simple type casting for non-float
for col in ['length', 'data1', 'data2', 'data3']:
if is_string_dtype(links[col]): # these columns are usually string if values use Emme engineering notation
links[col] = process_emme_eng_notation_series(links[col])
else:
links[col] = links[col].astype(float)
return nodes, links
def read_nwp_exatts_list(nwp_fp: Union[str, Path], **kwargs) -> pd.DataFrame:
"""A function to read the extra attributes present in a Network Package file (exported from Emme using the TMG
Toolbox).
Args:
nwp_fp (Union[str, Path]): File path to the network package.
**kwargs: Any valid keyword arguments used by ``pandas.read_csv()``.
Returns:
pd.DataFrame
"""
nwp_fp = Path(nwp_fp)
if not nwp_fp.exists():
raise FileNotFoundError(f'File `{nwp_fp.as_posix()}` not found.')
kwargs['index_col'] = False
if 'quotechar' not in kwargs:
kwargs['quotechar'] = "'"
with zipfile.ZipFile(nwp_fp) as zf:
df = pd.read_csv(zf.open('exatts.241'), **kwargs)
df.columns = df.columns.str.strip()
df['type'] = df['type'].astype('category')
return df
def _base_read_nwp_att_data(nwp_fp: Union[str, Path], att_type: str, index_col: Union[str, List[str]],
attributes: Union[str, List[str]] = None, **kwargs) -> pd.DataFrame:
nwp_fp = Path(nwp_fp)
if not nwp_fp.exists():
raise FileNotFoundError(f'File `{nwp_fp.as_posix()}` not found.')
if attributes is not None:
if isinstance(attributes, Hashable):
attributes = [attributes]
elif isinstance(attributes, list):
pass
else:
raise RuntimeError
if 'quotechar' not in kwargs:
kwargs['quotechar'] = "'"
with zipfile.ZipFile(nwp_fp) as zf:
df = pd.read_csv(zf.open(f'exatt_{att_type}.241'), **kwargs)
df.columns = df.columns.str.strip()
for col in df.columns:
if | is_string_dtype(df[col]) | pandas.api.types.is_string_dtype |
"""Author: <NAME>
This contains the main Spomato class to be used to access the Spotify API and create new playlists based on the user's
defined criteria.
"""
import os
import pandas as pd
import spotipy
class Spomato():
"""Object used to access spotify API through spotipy and generate playlists.
This can take a combination user's saved tracks, playlists, and/or artist's songs to generate a playlist of a
specified length. This was conceived to use the Tomato Timer method as Spotify playlists.
This does require the user to provide a user API token from the spotify API. The API scopes used by this library are
playlist-read-private, playlist-modify-private, and user-library-read.
Parameters
----------
access_token : str
A valid Spotify Access token.
Attributes
----------
data : dictionary
Dictionary storing available data structures to create playlists.
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read
current_user_id : str
The string id of the user of the access token used to create the spotipy session.
"""
def __init__(self,
access_token=None):
"""Initialization function that sets access token and generates initial spotipy session.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
self.access_token = access_token
self.data = {}
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def update_token(self, access_token):
"""Updates the token and spotify session with the provided access_token. Generally used if your access token
has expired.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
# update the class access token and the spotipy session
self.access_token = access_token
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def _get_spotipy_session(self):
"""Internal Function to create a new spotify session.
Returns
-------
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
"""
return spotipy.Spotify(auth=self.access_token)
@staticmethod
def _parse_album(album_data, market='US'):
"""Parses the album data returned from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
album_data : dict
A dictionary of album data from Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the album data and parse the track data
series_list = []
album_tracks = album_data['tracks']['items']
for record in album_tracks:
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_user_playlist(data, market='US'):
"""Parses a user playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['tracks']['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = | pd.concat(series_list, axis=1) | pandas.concat |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from cde.density_estimator import LSConditionalDensityEstimation, NeighborKernelDensityEstimation, KernelMixtureNetwork
from matplotlib.lines import Line2D
import pandas as pd
from cde.density_simulation import GaussianMixture, EconDensity
from cde.model_fitting.GoodnessOfFit import GoodnessOfFit
from cde.density_simulation.toy_densities import build_toy_dataset, build_toy_dataset2
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def generate_report():
econ_density = EconDensity()
X, Y = econ_density.simulate(n_samples=1000)
nke = NeighborKernelDensityEstimation()
nke.fit_by_cv(X, Y)
n_samples = 500
X_test = np.asarray([1 for _ in range(n_samples)])
Y_test = np.linspace(0, 8, num=n_samples)
Z = nke.pdf(X_test, Y_test)
def eval_econ_data():
gmm = GaussianMixture(ndim_x=1, ndim_y=1)
econ_density = EconDensity()
# print("ECON DATA --------------")
# print("KMN")
# for n_centers in [50, 100, 200]:
# kmn = KernelMixtureNetwork(n_centers=n_centers)
# gof = GoodnessOfFit(kmn, econ_density, n_observations=2000, print_fit_result=False, repeat_kolmogorov=1)
# gof_results = gof.compute_results()
# print("N_Centers:", n_centers)
# print(gof_results)
print("LAZY-Learner:")
nkde = KernelMixtureNetwork(n_training_epochs=10)
gof = GoodnessOfFit(nkde, gmm, n_observations=100, print_fit_result=False)
gof_results = gof.compute_results()
print(gof_results)
print(gof_results.report_dict())
#
# print("LSCDE")
# lscde = LSConditionalDensityEstimation()
# X, Y = econ_density.simulate(2000)
# nkde.fit_by_cv(X,Y)
# gof = GoodnessOfFit(lscde, econ_density, n_observations=2000, print_fit_result=False)
# gof_results = gof.compute_results()
# print(gof_results)
def plot_fitted_distribution():
n_observations = 1000 # number of data points
n_features = 3 # number of features
np.random.seed(22)
X_train, X_test, Y_train, Y_test = econ_density.simulate(n_observations)
model = KernelMixtureNetwork()
X_train = np.random.normal(loc=0, size=[n_observations, 1])
Y_train = 3 * X_train + np.random.normal(loc=0, size=[n_observations, 1])
X_test = np.random.normal(loc=0, size=[100, 1])
Y_test = 3 * X_test + np.random.normal(loc=0, size=[100, 1])
model.fit(X_train, Y_train)
print(model.score(X_test, Y_test))
#print(model.fit_by_cv(X_train, Y_train))
# plt.scatter(model.X_train, model.Y_test)
# plt.scatter(model.centr_x, model.centr_y, s=10*model.alpha)
# plt.show()
#
# fig, ax = plt.subplots()
# fig.set_size_inches(10, 8)
# sns.regplot(X_train, Y_train, fit_reg=False)
# plt.show()
#
#
n_samples = 1000
Y_plot = np.linspace(-10, 10, num=n_samples)
X_plot = np.expand_dims(np.asarray([-1 for _ in range(n_samples)]), axis=1)
result = model.pdf(X_plot, Y_plot)
plt.plot(Y_plot, result)
#plt.show()
#2d plot
X_plot = np.expand_dims(np.asarray([2 for _ in range(n_samples)]), axis=1)
result = model.pdf(X_plot, Y_plot)
plt.plot(Y_plot, result)
plt.show()
#3d plot
n_samples = 100
linspace_x = np.linspace(-15, 15, num=n_samples)
linspace_y = np.linspace(-15, 15, num=n_samples)
X, Y = np.meshgrid(linspace_x, linspace_y)
X, Y = X.flatten(), Y.flatten()
Z = model.pdf(X, Y)
X, Y, Z = X.reshape([n_samples, n_samples]), Y.reshape([n_samples, n_samples]), Z.reshape([n_samples, n_samples])
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=True)
plt.show()
def eval1():
n_observations = 2000 # number of data points
n_features = 1 # number of features
X_train, X_test, y_train, y_test = build_econ1_dataset(n_observations)
print("Size of features in training data: {}".format(X_train.shape))
print("Size of output in training data: {}".format(y_train.shape))
print("Size of features in test data: {}".format(X_test.shape))
print("Size of output in test data: {}".format(y_test.shape))
fig, ax = plt.subplots()
fig.set_size_inches(10, 8)
sns.regplot(X_train, y_train, fit_reg=False)
# plt.savefig('toydata.png')
# plt.show()
# plot.figure.size = 100
# plt.show()
kmn = KernelMixtureNetwork(train_scales=True, n_centers=20)
kmn.fit(X_train, y_train, n_epoch=300, eval_set=(X_test, y_test))
kmn.plot_loss()
# plt.savefig('trainplot.png')
samples = kmn.sample(X_test)
print(X_test.shape, samples.shape)
jp = sns.jointplot(X_test.ravel(), samples, kind="hex", stat_func=None, size=10)
jp.ax_joint.add_line(Line2D([X_test[0][0], X_test[0][0]], [-40, 40], linewidth=3))
jp.ax_joint.add_line(Line2D([X_test[1][0], X_test[1][0]], [-40, 40], color='g', linewidth=3))
jp.ax_joint.add_line(Line2D([X_test[2][0], X_test[2][0]], [-40, 40], color='r', linewidth=3))
plt.savefig('hexplot.png')
plt.show()
d = kmn.predict_density(X_test[0:3, :].reshape(-1, 1), resolution=1000)
df = | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]',
'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy',
'Sharpe Ratio', 'Calmar Ratio', 'Omega Ratio', 'Sortino Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(required_return=0.1, risk_free=0.01)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -227.45862849586334, -65.40868619923044, 0.0, -19.104372472268942
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(use_asset_returns=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997,
150.0, 5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966, np.nan,
-54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0, -0.10999000000000003,
np.nan, np.nan, 0.0, np.nan
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(incl_open=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -3.9702970297029667, -54.450495049504966,
np.nan, -29.210396039603964, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.1552449999999999, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf_grouped.stats(column='first'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 200.0, 194.95809, -2.520955, 275.0, -0.505305454620791,
0.82091, 2.46248125751388, pd.Timedelta('4 days 00:00:00'), 4, 2, 2, -4.512618181818182,
0.0, -54.450495049504966, -388.2424242424243, np.nan, -221.34645964596461, pd.NaT,
pd.Timedelta('2 days 00:00:00'), 0.0, -0.2646459090909091, -20.095906945591288,
-34.312217430388344, 0.0, -14.554511690523578
]),
index=stats_index,
name='first')
)
pd.testing.assert_series_equal(
pf.stats(column='a', tags='trades and open and not closed', settings=dict(incl_open=True)),
pd.Series(
np.array([
1, -0.20049999999999982
]),
index=pd.Index([
'Total Open Trades', 'Open Trade PnL'
], dtype='object'),
name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda trades: trades.winning_streak.max(),
resolve_trades=True
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda self, group_by: self.get_trades(group_by=group_by).winning_streak.max()
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
| pd.Series([0.0], index=['Max Winning Streak'], name='a') | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
| assert_panel_equal(p, p_orig) | pandas.util.testing.assert_panel_equal |
from typing import List
import numpy as np
import pandas as pd # type: ignore
import copy
import pdb
from sklearn.model_selection import TimeSeriesSplit # type: ignore
import dask
import dask.dataframe as dd
##### This function loads a time series data and sets the index as a time series
def load_ts_data(filename, ts_column, sep, target, dask_xgboost_flag=0):
"""
This function loads a given filename into a pandas dataframe and sets the
ts_column as a Time Series index. Note that filename should contain the full
path to the file.
Inputs:
filename: name of file that contains data
ts_column: name of time series column in data
sep: separator used as a column separator in datafile
target: name of the target column to predict
dask_xgboost_flag: flag that will tell whether to load into dask or pandas dataframe.
If dask_xgboost_flag is set to True it returns both a dask as well as pandas DataFrame.
If dask_xgboost_flag is set to False it returns both of them as pandas DataFrames.
Outputs:
dft: dask DataFrame
filename: pandas DataFrame
"""
if isinstance(filename, str):
filename = pd.read_csv(filename, sep=sep, parse_dates=[ts_column])
### If filename is not a string, it must be a dataframe and can be loaded
if dask_xgboost_flag:
if type(filename) == dask.dataframe.core.DataFrame:
print(' Since dask_xgboost_flag is True, and input is dask, continuing...')
else:
filename = copy.deepcopy(filename)
print(' Since dask_xgboost_flag is True and input is pandas, reducing memory size of df and loading into dask')
filename = reduce_mem_usage(filename)
dft = dd.from_pandas(filename, npartitions=1)
print(' Converted pandas dataframe into a Dask dataframe ...' )
else:
dft = copy.deepcopy(filename)
print(' Using given input: pandas dataframe...')
################## L O A D T E S T D A T A ######################
dft = remove_duplicate_cols_in_dataset(dft)
####### Make sure you change it to a date-time index #####
if dask_xgboost_flag:
### if dask exists, you need to change its datetime index also ##
dft, _ = change_to_datetime_index(dft, ts_column)
### you have to change the pandas df also to datetime index ###
filename, str_format = change_to_datetime_index(filename, ts_column)
#preds = [x for x in list(dft) if x not in [target]]
#dft = dft[[target]+preds]
return dft, filename, str_format
####################################################################################################################
def load_test_data(filename, ts_column, sep, target, dask_xgboost_flag=0):
"""
This function loads a given filename into a pandas dataframe and sets the
ts_column as a Time Series index. Note that filename should contain the full
path to the file.
"""
if isinstance(filename, str):
filename = pd.read_csv(filename, sep=sep, index_col=ts_column, parse_dates=True)
### If filename is not a string, it must be a dataframe and can be loaded
else:
if type(filename) == dask.dataframe.core.DataFrame:
print(' Since dask_xgboost_flag is True, and input is dask, continuing...')
ddf = filename.compute()
print(' Converted dask dataframe into a pandas dataframe ...' )
print(' Reducing memory size of df and loading into dask')
dft = reduce_mem_usage(ddf)
else:
dft = copy.deepcopy(filename)
print(' Using given input: pandas dataframe...')
################## L O A D T E S T D A T A ######################
dft = remove_duplicate_cols_in_dataset(dft)
return dft
####################################################################################################################
def remove_duplicate_cols_in_dataset(df):
df = copy.deepcopy(df)
cols = df.columns.tolist()
number_duplicates = df.columns.duplicated().astype(int).sum()
if number_duplicates > 0:
print('Detected %d duplicate columns in dataset. Removing duplicates...' %number_duplicates)
df = df.loc[:,~df.columns.duplicated()]
return df
###########################################################################
def change_to_datetime_index(dft, ts_column):
dft = copy.deepcopy(dft)
if isinstance(dft, pd.Series) or isinstance(dft, pd.DataFrame):
try:
### If ts_column is not a string column, then set its format to an empty string ##
str_format = ''
############### Check if it has an index or a column with the name of train time series column ####
if ts_column in dft.columns:
print(' %s column exists in given train data...' %ts_column)
str_first_value = dft[ts_column].values[0]
str_values = dft[ts_column].values[:12] ### we want to test a big sample of them
if type(str_first_value) == str:
### if it is an object column, convert ts_column into datetime and then set as index
str_format = infer_date_time_format(str_values)
if str_format:
str_format = str_format[0]
ts_index = pd.to_datetime(dft.pop(ts_column), format=str_format)
else:
ts_index = pd.to_datetime(dft.pop(ts_column))
dft.index = ts_index
elif type(str_first_value) == pd.Timestamp or type(str_first_value) == np.datetime64:
### if it is a datetime column, then set it as index
### if it a datetime index, then just set the index as is
ts_index = dft.pop(ts_column)
dft.index = ts_index
elif type(str_first_value) in [np.int8, np.int16, np.int32, np.int64]:
### if it is an integer column, convert ts_column into datetime and then set as index
ts_index = pd.to_datetime(dft.pop(ts_column))
dft.index = ts_index
else:
print(' Type of time series column %s is float or unknown. Must be string or datetime. Please check input and try again.' %ts_column)
return
elif ts_column in dft.index.name:
print(' train time series %s column is the index on test data...' %ts_column)
ts_index = dft.index
str_first_value = ts_index[0]
str_values = ts_index[:12]
if type(str_first_value) == str:
### if index is in string format, you must infer its datetime string format and then set datetime index
str_format = infer_date_time_format(str_values)
if str_format:
str_format = str_format[0]
ts_index = pd.to_datetime(ts_index, format=str_format)
else:
ts_index = pd.to_datetime(ts_index)
dft.index = ts_index
elif type(ts_index) == pd.core.indexes.datetimes.DatetimeIndex:
### if dft already has a datetime index, leave it as it is
pass
elif type(ts_index) == pd.DatetimeIndex or dft.index.dtype == '<M8[ns]':
### if dft already has a datatime index, leave it as is
pass
elif type(str_first_value) in [np.int8, np.int16, np.int32, np.int64]:
### if it is not a datetime index, then convert it to datetime and set the index
ts_index = pd.to_datetime(ts_index)
dft.index = ts_index
else:
print(' Type of index is unknown or float. It must be datetime or string. Please check input and try again.')
return
else:
print(f" (Error) Cannot find '{ts_column}' (or index) in given data.")
return None
except:
print(' Trying to convert time series column %s into index erroring. Please check input and try again.' %ts_column)
return
elif type(dft) == dask.dataframe.core.DataFrame:
str_format = ''
if ts_column in dft.columns:
print(' %s column exists in dask data frame...' %ts_column)
str_first_value = dft[ts_column].compute()[0]
dft.index = dd.to_datetime(dft[ts_column].compute())
dft = dft.drop(ts_column, axis=1)
elif ts_column in dft.index.name:
print(' train index %s is already a time series index. Continuing...' %ts_column)
else:
print(f" (Error) Model to be used for prediction 'ML'. Hence, input df must have a column (or index) called '{ts_column}' corresponding to the original ts_index column passed during training. No predictions will be made.")
return None
else:
print(' Unable to detect type of data. Please check your input and try again')
return
return dft, str_format
############################################################################################################
def change_to_datetime_index_test(testdata, ts_column, str_format=''):
testdata = copy.deepcopy(testdata)
if str_format:
print('Date_time string format given as %s' %str_format)
else:
print(' Alert: No strf_time_format given for %s. Provide strf_time format during "setup" for better results.' %ts_column)
##### This is where we change the time index of test data #############
try:
if isinstance(testdata, pd.Series) or isinstance(testdata, pd.DataFrame):
if ts_column in testdata.columns:
###### If the str_format is there, set the column as time series index ##
ts_index = testdata.pop(ts_column)
if str_format:
ts_index = pd.to_datetime(ts_index, format=str_format)
else:
ts_index = | pd.to_datetime(ts_index) | pandas.to_datetime |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Creates graphs and summary information from event logs.
This was created to build the graphs and supporting data for the README pages
for each agent. There are a number of FLAGS but the script is not designed to be
fully configurable. Making changes directly in the script is expected for one
off needs.
Usage examples:
python3 graph_builder.py --eventlog=<path to event log 1> \
--eventlog=<path to event log 2>
"""
import csv
import datetime
import enum
import os
from typing import Dict, List, Optional, Sequence, Tuple, Union
from absl import app
from absl import flags
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.util import event_pb2 # TF internal
from tensorflow.python.lib.io import tf_record # TF internal
# pylint: enable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
flags.DEFINE_multi_string('eventlog', None,
'Diretory where eventlog is stored.')
flags.DEFINE_string('output_path', '.',
'Path to store the graph and any other associated data.')
flags.DEFINE_string('output_prefix', 'results', 'Prefix used for artifacts')
flags.DEFINE_string('graph_title', '', 'Title for the graph.')
flags.DEFINE_string('graph_xaxis_title', 'steps', 'Title for the x-axis.')
flags.DEFINE_string('graph_yaxis_title', 'AverageReturn',
'Title for the y-axis or event_name is used.')
flags.DEFINE_string('event_name', 'AverageReturn', 'Name of event to track.')
flags.DEFINE_integer('end_step', None,
'If set, processing of the event log ends on this step.')
flags.DEFINE_boolean('show_graph', False, 'If true, show graph in a window.')
class GraphAggTypes(enum.Enum):
"""Enum of options to aggregate data when generating a graph."""
MEAN = 'mean'
MEDIAN = 'median'
flags.DEFINE_enum_class('graph_agg', GraphAggTypes.MEAN, GraphAggTypes,
'Method to aggregate data for the graph.')
Number = Union[int, float]
class StatsBuilder(object):
"""Builds graphs and other summary information from eventlogs."""
def __init__(self,
eventlog_dirs: List[str],
event_tag: str,
output_path: str = '.',
title: str = '',
xaxis_title: str = 'steps',
yaxis_title: Optional[str] = None,
graph_agg: GraphAggTypes = GraphAggTypes.MEAN,
output_prefix: str = 'results',
end_step: Optional[int] = None,
show_graph: bool = False):
"""Initializes StatsBuilder class.
Args:
eventlog_dirs: List of paths to event log directories to process.
event_tag: Event to extract from the logs.
output_path: Output path for artifacts, e.g. graphs and cvs files.
title: Title of the graph.
xaxis_title: Title for x-axis of the graph. Defaults to "steps".
yaxis_title: Title for the y-axis. Defaults to the `event_tag`.
graph_agg: Aggregation for the graph.
output_prefix: Prefix for the artifact files. Defaults to "results".
end_step: If set, processing of the event log ends on this step.
show_graph: If true, blocks and shows graph. Only tests in linux.
Raises:
ValueError: Raised if the graph_agg passed is not understood.
"""
self.eventlog_dirs = eventlog_dirs
self.event_tag = event_tag
self.output_path = output_path
self.title = title
self.xaxis_title = xaxis_title
self.show_graph = show_graph
self.end_step = end_step
if graph_agg == GraphAggTypes.MEAN:
self.graph_agg = np.mean
elif graph_agg == GraphAggTypes.MEDIAN:
self.graph_agg = np.median
else:
raise ValueError('Unknown graph_agg:{}'.format(graph_agg))
# Makes the output path absolute for clarity.
self.output_dir = os.path.abspath(output_path)
os.makedirs(self.output_dir, exist_ok=True)
self.output_prefix = output_prefix
if yaxis_title:
self.yaxis_title = yaxis_title
else:
self.yaxis_title = event_tag
def _summary_iterator(self, path):
for record in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(record)
def _extract_values(
self, event_log_dir: str) -> Tuple[Dict[int, np.generic], float]:
"""Extracts the event values for the `event_tag` and total wall time.
Args:
event_log_dir: Path to the event log directory.
Returns:
Tuple with a dict of int: int (step: event value) and the total walltime
in minutes.
Raises:
ValueError: If no events are found or the final step is smaller than the
`end_step` requested.
FileNotFoundError: If an event log is not found in the event log
directory.
"""
start_step = 0
current_step = 0
start_time = 0
max_wall_time = 0.0
event_log_path = os.path.join(event_log_dir, 'events.out.tfevents.*')
# In OSS tf.io.gfile.glob throws `NotFoundError` vs returning an empty list.
# Catching `NotFoundError` and doing the check yields a consistent message.
try:
event_files = tf.io.gfile.glob(event_log_path)
except tf.errors.NotFoundError:
event_files = []
if not event_files:
raise FileNotFoundError(
f'No files found matching pattern:{event_log_path}')
assert len(event_files) == 1, (
'Found {} event files({}) matching "{}" pattertn and expected 1.'
.format(len(event_files), ','.join(event_files), event_log_path))
event_file = event_files[0]
logging.info('Processing event file: %s', event_file)
event_values = {}
for summary in self._summary_iterator(event_file):
current_step = summary.step
logging.debug('Event log item: %s', summary)
for value in summary.summary.value:
if value.tag == self.event_tag:
ndarray = tf.make_ndarray(value.tensor)
event_values[summary.step] = ndarray.item(0)
if current_step == start_step:
start_time = summary.wall_time
logging.info(
'training start (step %d): %s', current_step,
datetime.datetime.fromtimestamp(
summary.wall_time).strftime('%Y-%m-%d %H:%M:%S.%f'))
max_wall_time = summary.wall_time
if self.end_step and summary.step >= self.end_step:
break
if not start_time:
raise ValueError(
'Error: Starting event not found. Check arg event_name and '
'warmup_steps. Possible no events were found.')
if self.end_step and current_step < self.end_step:
raise ValueError(
'Error: Final step was less than the requested end_step.')
elapse_time = (max_wall_time - start_time) / 60
logging.info(
'training end (step %d): %s', current_step,
datetime.datetime.fromtimestamp(max_wall_time).strftime(
'%Y-%m-%d %H:%M:%S.%f'))
logging.info('elapsed time:%dm', elapse_time)
return event_values, elapse_time
def _gather_data(self) -> Tuple[List[Dict[int, np.generic]], List[float]]:
"""Gather data from all of the logs and add to the data_collector list.
Returns:
Tuple of arrays indexed by log file, e.g. data_collector[0] is all of the
values found in the event log for the given event and walltimes[0] is the
total time in minutes it took to get to the end_step in that event log.
"""
data_collector, walltimes = [], []
for eventlog_dir in self.eventlog_dirs:
data, total_time = self._extract_values(eventlog_dir)
walltimes.append(total_time)
data_collector.append(data)
return data_collector, walltimes
def _align_and_aggregate(
self, data_collector: List[Dict[int,
np.generic]]) -> List[Sequence[Number]]:
"""Combines data from multipole runs into a pivot table like structure.
Uses the first run as the base and aligns the data for each run by rows
with each row representing a step. If a step is not found in a run,
the value -1 is used. No error or warning is thrown or logged.
Args:
data_collector: list of dicts with each dict representing a run most
likely extracted from an event log.
Returns:
2d array with each row representing a step and each run represented as
a column, e.g. step, run 1, run 2, median, and mean.
"""
# Use the first event log's steps as the base and create aggregated data
# at the step internals of the first event log.
base_data = data_collector[0]
agg_data = []
for key, value in sorted(base_data.items()):
entry = [key]
values = [value]
for data in data_collector[1:]:
values.append(data.get(key, -1))
mean_val = np.mean(values)
median_val = np.median(values)
# Combines into step, values 1..n, median, and mean.
values.append(median_val)
values.append(mean_val)
entry += values
agg_data.append(entry)
return agg_data
def _output_csv(self, agg_data: List[Sequence[Number]]):
"""Exports the `agg_data` as a csv.
Args:
agg_data: 2d array of data to export to csv.
"""
# Outputs csv with aggregated data for each step.
csv_path = os.path.join(self.output_path,
self.output_prefix + '_summary.csv')
with open(csv_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(agg_data)
def _output_graph(self, agg_data: List[Sequence[Number]], num_runs: int):
"""Builds a graph of the results and outputs to a .png.
Args:
agg_data: 2d array of data to be graphed.
num_runs: Number of columns of runs in the data.
"""
# Build data frames
columns = ['step']
columns.extend([str(i) for i in range(num_runs)])
# csv contains aggregate info that will get excluded in the pd.melt.
columns.extend(['median', 'mean'])
print(columns)
df = | pd.DataFrame(agg_data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.quotes.bond import Bond1dKdata
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, now_pd_timestamp, TIME_FORMAT_DAY
try:
from EmQuantAPI import *
except:
pass
class EmChinaBondKdataRecorder(Recorder):
data_schema = Bond1dKdata
provider = 'emquantapi'
def __init__(self, batch_size=10, force_update=True, sleeping_time=10) -> None:
super().__init__(batch_size, force_update, sleeping_time)
# 调用登录函数(激活后使用,不需要用户名密码)
loginResult = c.start("ForceLogin=1", '')
if (loginResult.ErrorCode != 0):
print("login in fail")
exit()
def run(self):
from zvt.api import get_kdata
bond_data = get_kdata(entity_id='bond_cn_EMM00166466')
now_date = to_time_str(now_pd_timestamp())
if bond_data.empty:
# 初始时间定在2007年
start = '2007-01-01'
else:
start = to_time_str(bond_data.timestamp.max())
# EMM00166466 中债国债到期收益率:10年
df = c.edb("EMM00166466", f"IsLatest=0,StartDate={start},EndDate={now_date},ispandas=1")
if pd_is_not_null(df):
df['name'] = "中债国债到期收益率:10年"
df.rename(columns={'RESULT': 'data_value', 'DATES': 'timestamp'}, inplace=True)
df['entity_id'] = 'bond_cn_EMM00166466'
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
# coding=utf-8
import torch
import re
import pandas as pd
import json
from torch.nn.utils.rnn import pad_sequence
from seqeval.metrics import precision_score, recall_score, f1_score
from torch.utils.data import Dataset
def evaluate_(output, labels, ignore_idx):
### ignore index 0 (padding) when calculating accuracy
idxs = (labels != ignore_idx).squeeze()
o_labels = torch.softmax(output, dim=1).max(1)[1]
l = labels.squeeze()[idxs]; o = o_labels[idxs]
if len(idxs) > 1:
acc = (l == o).sum().item()/len(idxs)
else:
acc = (l == o).sum().item()
l = l.cpu().numpy().tolist() if l.is_cuda else l.numpy().tolist()
o = o.cpu().numpy().tolist() if o.is_cuda else o.numpy().tolist()
return acc, (o, l)
def evaluate_results(net, test_loader, pad_id, cuda):
print("Evaluating test samples...")
acc = 0; out_labels = []; true_labels = []
net.eval()
with torch.no_grad():
for i, data in enumerate(test_loader):
x, e1_e2_start, labels, _,_,_ = data
attention_mask = (x != pad_id).float()
token_type_ids = torch.zeros((x.shape[0], x.shape[1])).long()
if cuda:
x = x.cuda()
labels = labels.cuda()
attention_mask = attention_mask.cuda()
token_type_ids = token_type_ids.cuda()
classification_logits = net(x, token_type_ids=token_type_ids, attention_mask=attention_mask, Q=None,\
e1_e2_start=e1_e2_start)
accuracy, (o, l) = evaluate_(classification_logits, labels, ignore_idx=-1)
out_labels.append([str(i) for i in o]); true_labels.append([str(i) for i in l])
acc += accuracy
accuracy = acc/(i + 1)
results = {
"accuracy": accuracy,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
print("***** Eval results *****")
for key in sorted(results.keys()):
print(" %s = %s" % (key, str(results[key])))
return results
def process_text(text, mode='train'):
sents, relations, comments, blanks = [], [], [], []
for i in range(int(len(text)/4)):
sent = text[4*i]
relation = text[4*i + 1]
comment = text[4*i + 2]
blank = text[4*i + 3]
# check entries
if mode == 'train':
assert int(re.match("^\d+", sent)[0]) == (i + 1)
else:
assert (int(re.match("^\d+", sent)[0]) - 8000) == (i + 1)
assert re.match("^Comment", comment)
assert len(blank) == 1
sent = re.findall("\"(.+)\"", sent)[0]
sent = re.sub('<e1>', '[E1]', sent)
sent = re.sub('</e1>', '[/E1]', sent)
sent = re.sub('<e2>', '[E2]', sent)
sent = re.sub('</e2>', '[/E2]', sent)
sents.append(sent); relations.append(relation), comments.append(comment); blanks.append(blank)
return sents, relations, comments, blanks
class Relations_Mapper(object):
def __init__(self, relations):
self.rel2idx = {}
self.idx2rel = {}
print("Mapping relations to IDs...")
self.n_classes = 0
for relation in relations:
if relation not in self.rel2idx.keys():
self.rel2idx[relation] = self.n_classes
self.n_classes += 1
for key, value in self.rel2idx.items():
self.idx2rel[value] = key
class Pad_Sequence():
"""
collate_fn for dataloader to collate sequences of different lengths into a fixed length batch
Returns padded x sequence, y sequence, x lengths and y lengths of batch
"""
def __init__(self, seq_pad_value, label_pad_value=-1, label2_pad_value=-1,\
):
self.seq_pad_value = seq_pad_value
self.label_pad_value = label_pad_value
self.label2_pad_value = label2_pad_value
def __call__(self, batch):
sorted_batch = sorted(batch, key=lambda x: x[0].shape[0], reverse=True)
seqs = [x[0] for x in sorted_batch]
seqs_padded = pad_sequence(seqs, batch_first=True, padding_value=self.seq_pad_value)
x_lengths = torch.LongTensor([len(x) for x in seqs])
labels = list(map(lambda x: x[1], sorted_batch))
labels_padded = pad_sequence(labels, batch_first=True, padding_value=self.label_pad_value)
y_lengths = torch.LongTensor([len(x) for x in labels])
labels2 = list(map(lambda x: x[2], sorted_batch))
labels2_padded = pad_sequence(labels2, batch_first=True, padding_value=self.label2_pad_value)
y2_lengths = torch.LongTensor([len(x) for x in labels2])
return seqs_padded, labels_padded, labels2_padded, x_lengths, y_lengths, y2_lengths
class semeval_dataset(Dataset):
def __init__(self, df, tokenizer, e1_id, e2_id):
self.e1_id = e1_id
self.e2_id = e2_id
self.df = df
print("Tokenizing data...")
self.df['input'] = self.df.apply(lambda x: tokenizer.encode(x['sents']), axis=1)
def get_e1e2_start(x, e1_id, e2_id):
e1_e2_start = ([i for i, e in enumerate(x) if e == e1_id][0],\
[i for i, e in enumerate(x) if e == e2_id][0])
return e1_e2_start
self.df['e1_e2_start'] = self.df.apply(lambda x: get_e1e2_start(x['input'], e1_id=self.e1_id, e2_id=self.e2_id), axis=1)
def __len__(self,):
return len(self.df)
def __getitem__(self, idx):
return torch.LongTensor(self.df.iloc[idx]['input']),\
torch.LongTensor(self.df.iloc[idx]['e1_e2_start']),\
torch.LongTensor([self.df.iloc[idx]['relations_id']])
def preprocess_semeval2010_8(train_data, test_data):
'''
Data preprocessing for SemEval2010 task 8 dataset
'''
data_path = train_data
print("Reading training file %s..." % data_path)
with open(data_path, 'r', encoding='utf8') as f:
text = f.readlines()
sents, relations, comments, blanks = process_text(text, 'train')
df_train = pd.DataFrame(data={'sents': sents, 'relations': relations})
data_path = test_data
print("Reading test file %s..." % data_path)
with open(data_path, 'r', encoding='utf8') as f:
text = f.readlines()
sents, relations, comments, blanks = process_text(text, 'test')
df_test = pd.DataFrame(data={'sents': sents, 'relations': relations})
rm = Relations_Mapper(df_train['relations'])
df_test['relations_id'] = df_test.apply(lambda x: rm.rel2idx[x['relations']], axis=1)
df_train['relations_id'] = df_train.apply(lambda x: rm.rel2idx[x['relations']], axis=1)
return df_train, df_test, rm
def detokenize(text, h, t):
text_with_ents = []
for i, token in enumerate(text):
if i==h['pos'][0]:
text_with_ents.append('[E1]')
if i==h['pos'][1]:
text_with_ents.append('[/E1]')
if i==t['pos'][0]:
text_with_ents.append('[E2]')
if i==t['pos'][1]:
text_with_ents.append('[/E2]')
text_with_ents.append(token)
return ' '.join(text_with_ents)
def process_wiki80_text(text):
sents, relations = [], []
for i in range(len(text)):
text_obj = json.loads(text[i])
sent = detokenize(text_obj['token'], text_obj['h'], text_obj['t'])
relation = text_obj['relation']
sents.append(sent); relations.append(relation)
return sents, relations
def preprocess_wiki80(train_data, test_data):
'''
Data preprocessing for SemEval2010 task 8 dataset
'''
data_path = train_data
print("Reading training file %s..." % data_path)
with open(data_path, 'r', encoding='utf8') as f:
text = f.readlines()
sents, relations = process_wiki80_text(text)
df_train = | pd.DataFrame(data={'sents': sents, 'relations': relations}) | pandas.DataFrame |
# the goal of the file is to develop the ada_boost algorithm
import pandas as pd
import numpy as np
import os
import time
import matplotlib.pyplot as plt
import multiprocessing
from joblib import Parallel, delayed
t0 = time.time()
def beta_cal(epsolon):
beta = 1/((1-epsolon)/epsolon)
return beta
def weight_cal(Distribution,label,prediction,error,gama):
e_p = ((prediction == label) & (label == 1)).astype(int)
e_n = ((prediction == label) & (label == -1)).astype(int)
e = gama*e_p + (1-gama)*e_n
epsolan = sum(Distribution*(1-(e_p+e_n)))
beta = beta_cal(epsolan)
Distribution_new = Distribution*beta**(e_p+e_n)
Distribution_new = gama*Distribution_new[1 == label] + (1-gama)*Distribution_new[-1 == label]
Distribution_new = Distribution_new/sum(Distribution_new)
print(sum(abs(Distribution_new-Distribution)))
return Distribution_new,beta
def decision_stamp_search(list_data):
F_star = float('inf')
for i in range(len(list_data)):
j = list_data[i][0]
Xj = list_data[i][1]
Yj = list_data[i][2]
Dj = list_data[i][3]
F = sum(Dj[Yj == 1])
if F< F_star:
F_star = F
theta_star = Xj[0]-1
j_star = j
for i in range(0,row-1):
F = F - Yj[i]*Dj[i]
if ((F<F_star) & (Xj[i] != Xj[i+1])):
F_star = F
theta_star= 0.5*((Xj[i] + Xj[i+1]))
j_star=j
return(j_star,theta_star,)
def parllel_sort(S_np,j,row):
X_np = S_np[:,:-2]
Sort = S_np[:,j].argsort()
Xj = (S_np[:,j])[Sort]
Yj = (S_np[:,-2])[Sort]
Dj = (S_np[:,-1])[Sort]
return(j,Xj,Yj,Dj)
def decision_stamp(S,Distribution):
temp = time.time()
F_star = float('inf')
X = S.drop(columns = ['Label','Distribution'])
S_np = np.array(S)
t0 = time.time()
S_np[:,-1] = Distribution
print(time.time()-temp)
F_star = float('inf')
X = S.drop(columns = ['Label','Distribution'])
S_np = np.array(S)
[row,col] = S_np.shape
num_cores = multiprocessing.cpu_count()
processed_list = Parallel(n_jobs=num_cores)(delayed(parllel_sort)(S_np,j,row) for j in range(col-2))
[j_star,theta_star] = decision_stamp_search(processed_list,F_star)
return (j_star,theta_star)
def error_calcuator(prediction,label):
error = sum(prediction != label)/len(label)
error_2 = sum((prediction == 1)& (label == -1))/len(label)
error_3 = sum((prediction == -1)& (label == 1))/len(label)
return error,error_2,error_3
def ada_boost(S,y,rounds,gama):
beta_list = []
j_of_round = []
e_t = []
theta = []
parity_tol = []
new_Distribution = S['Distribution']
for i in range(0,rounds):
[J_star,theta_star] = decision_stamp(S,new_Distribution)
prediction = 2*(S[S.columns[J_star]]>=theta_star).astype(int) - 1
[e1, e2,e3] = error_calcuator(prediction,y)
if e1 <= 0.5:
parity = 1
else :
parity = -1
prediction = 2*(S[S.columns[J_star]]<=theta_star).astype(int) - 1
parity_tol.append(parity)
[e1, e2,e3] = error_calcuator(prediction,y)
[new_Distribution,beta] = weight_cal(new_Distribution,y,prediction,e1,gama)
beta_list.append(beta)
j_of_round.append(J_star)
theta.append(theta_star)
e_t.append([e1,e2,e3])
e_t = np.array(e_t)
e_t.resize(rounds,3)
return (np.array(beta_list), np.array(j_of_round), np.array(e_t), np.array(theta), np.array(parity_tol))
def df_maker(S):
col = []
for names in S.columns:
col.append(str(names))
col[-1] = 'Label'
col = col + ['Distribution']
train_y = S[S.columns[-1]]
P_count = train_y[train_y == 1].count()
N_count = len(train_y) - P_count
Distribution = np.array([1/(2*P_count)]*P_count + [1/(2*N_count)]*N_count)
S = S.merge( | pd.Series(Distribution) | pandas.Series |
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import needs_i8_conversion
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_value_counts(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.value_counts()
counter = collections.Counter(obj)
expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated().any():
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_null(null_obj, index_or_series_obj):
orig = index_or_series_obj
obj = orig.copy()
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(orig, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_value_counts_inferred(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({ | Interval(0.997, 3.0) | pandas.Interval |
# -*- coding: utf-8 -*-
'''
This code generates Fig. 1
Trend of global mean surface temperature and anthropogenic aerosol emissions
by <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import _env
import seaborn.apionly as sns
import matplotlib
from scipy import stats
from statsmodels.tsa.stattools import adfuller as ADF
from statsmodels.tsa.stattools import acf
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Helvetica'
#import matplotlib
nens = _env.nens
parameters_info = _env.parameters_info
scenarios = _env.scenarios
par = 'TREFHT'
scen_aero = scenarios[1]
scen_base = scenarios[0]
if_temp = _env.odir_root + '/' + parameters_info[par]['dir']+ '/Global_Mean_' + par + '_1850-2019_ensembles' + '.xls'
if_temp_pi = _env.odir_root + '/' + parameters_info[par]['dir']+ '/Global_Mean_Temperature_pre-industrial_110yrs.xls'
odir_plot = _env.odir_root + '/plot/'
_env.mkdirs(odir_plot)
of_plot = odir_plot + 'F1.Trend_Temp_Emission.png'
itbl_temp = pd.read_excel(if_temp,index_col=0)
itbl_temp_pi = | pd.read_excel(if_temp_pi,index_col=0) | pandas.read_excel |
# encoding: utf-8
# (c) 2017-2019 Open Risk (https://www.openriskmanagement.com)
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
""" Converter utilities to help switch between various formats """
import pandas as pd
def datetime_to_float(dataframe):
"""
.. _Datetime_to_float:
Converts dates from string format to the canonical float format
:param dataframe: Pandas dataframe with dates in string format
:return: Pandas dataframe with dates in float format
:rtype: object
.. note:: The date string must be recognizable by the pandas to_datetime function.
"""
start_date = dataframe['Time'].min()
end_date = dataframe['Time'].max()
total_days = (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days
dataframe['Time'] = dataframe['Time'].apply(
lambda x: (pd.to_datetime(x) - | pd.to_datetime(start_date) | pandas.to_datetime |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import json
import pandas
import matplotlib
import modin.pandas as pd
from numpy.testing import assert_array_equal
import sys
from modin.pandas.utils import to_pandas
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_string_data_values,
test_string_data_keys,
test_string_list_data_values,
test_string_list_data_keys,
string_sep_values,
string_sep_keys,
string_na_rep_values,
string_na_rep_keys,
numeric_dfs,
no_numeric_dfs,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def get_rop(op):
if op.startswith("__") and op.endswith("__"):
return "__r" + op[2:]
else:
return None
def inter_df_math_helper(modin_series, pandas_series, op):
inter_df_math_helper_one_side(modin_series, pandas_series, op)
rop = get_rop(op)
if rop:
inter_df_math_helper_one_side(modin_series, pandas_series, rop)
def inter_df_math_helper_one_side(modin_series, pandas_series, op):
try:
pandas_attr = getattr(pandas_series, op)
except Exception as e:
with pytest.raises(type(e)):
_ = getattr(modin_series, op)
return
modin_attr = getattr(modin_series, op)
try:
pandas_result = pandas_attr(4)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(4)) # repr to force materialization
else:
modin_result = modin_attr(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_attr(4.0)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(4.0)) # repr to force materialization
else:
modin_result = modin_attr(4.0)
df_equals(modin_result, pandas_result)
# These operations don't support non-scalar `other` or have a strange behavior in
# the testing environment
if op in [
"__divmod__",
"divmod",
"rdivmod",
"floordiv",
"__floordiv__",
"rfloordiv",
"__rfloordiv__",
"mod",
"__mod__",
"rmod",
"__rmod__",
]:
return
try:
pandas_result = pandas_attr(pandas_series)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(modin_series)) # repr to force materialization
else:
modin_result = modin_attr(modin_series)
df_equals(modin_result, pandas_result)
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_series.shape[0]))
try:
pandas_result = pandas_attr(list_test)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(list_test)) # repr to force materialization
else:
modin_result = modin_attr(list_test)
df_equals(modin_result, pandas_result)
series_test_modin = pd.Series(list_test, index=modin_series.index)
series_test_pandas = pandas.Series(list_test, index=pandas_series.index)
try:
pandas_result = pandas_attr(series_test_pandas)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(series_test_modin)) # repr to force materialization
else:
modin_result = modin_attr(series_test_modin)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_series.index]
)
modin_df_multi_level = modin_series.copy()
modin_df_multi_level.index = new_idx
try:
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, level=1)
except TypeError:
# Some operations don't support multilevel `level` parameter
pass
def create_test_series(vals):
if isinstance(vals, dict):
modin_series = pd.Series(vals[next(iter(vals.keys()))])
pandas_series = pandas.Series(vals[next(iter(vals.keys()))])
elif isinstance(vals, list):
modin_series = pd.Series(vals)
pandas_series = pandas.Series(vals)
return modin_series, pandas_series
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_frame(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.to_frame(name="miao"), pandas_series.to_frame(name="miao"))
def test_accessing_index_element_as_property():
s = pd.Series([10, 20, 30], index=["a", "b", "c"])
assert s.b == 20
with pytest.raises(Exception):
_ = s.d
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_callable_key_in_getitem(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series[lambda s: s.index % 2 == 0],
pandas_series[lambda s: s.index % 2 == 0],
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_T(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.T, pandas_series.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.__abs__(), pandas_series.__abs__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___and__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__and__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___array__(data):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.__array__()
assert_array_equal(modin_result, pandas_series.__array__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___bool__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__bool__()
except Exception as e:
with pytest.raises(type(e)):
modin_series.__bool__()
else:
modin_result = modin_series.__bool__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(request, data):
modin_series, pandas_series = create_test_series(data)
result = False
key = "Not Exist"
assert result == modin_series.__contains__(key)
assert result == (key in modin_series)
if "empty_data" not in request.node.name:
result = True
key = pandas_series.keys()[0]
assert result == modin_series.__contains__(key)
assert result == (key in modin_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___copy__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.copy(), modin_series)
df_equals(modin_series.copy(), pandas_series.copy())
df_equals(modin_series.copy(), pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___deepcopy__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.__deepcopy__(), modin_series)
df_equals(modin_series.__deepcopy__(), pandas_series.__deepcopy__())
df_equals(modin_series.__deepcopy__(), pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(data):
modin_series, pandas_series = create_test_series(data)
del modin_series[modin_series.index[0]]
del pandas_series[pandas_series.index[0]]
df_equals(modin_series, pandas_series)
del modin_series[modin_series.index[-1]]
del pandas_series[pandas_series.index[-1]]
df_equals(modin_series, pandas_series)
del modin_series[modin_series.index[0]]
del pandas_series[pandas_series.index[0]]
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divmod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "divmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdivmod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rdivmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___eq__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__eq__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___ge__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__ge__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series[0], pandas_series[0])
df_equals(
modin_series[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]
)
modin_series = pd.Series(list(range(1000)))
pandas_series = pandas.Series(list(range(1000)))
df_equals(modin_series[:30], pandas_series[:30])
df_equals(modin_series[modin_series > 500], pandas_series[pandas_series > 500])
# Test empty series
df_equals(pd.Series([])[:30], pandas.Series([])[:30])
def test___getitem__1383():
# see #1383 for more details
data = ["", "a", "b", "c", "a"]
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
df_equals(modin_series[3:7], pandas_series[3:7])
@pytest.mark.parametrize("start", [-7, -5, -3, 0, None, 3, 5, 7])
@pytest.mark.parametrize("stop", [-7, -5, -3, 0, None, 3, 5, 7])
def test___getitem_edge_cases(start, stop):
data = ["", "a", "b", "c", "a"]
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
df_equals(modin_series[start:stop], pandas_series[start:stop])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___gt__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__gt__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___int__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = int(pandas_series[0])
except Exception as e:
with pytest.raises(type(e)):
int(modin_series[0])
else:
assert int(modin_series[0]) == pandas_result
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__invert__()
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.__invert__())
else:
df_equals(modin_series.__invert__(), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___iter__(data):
modin_series, pandas_series = create_test_series(data)
for m, p in zip(modin_series.__iter__(), pandas_series.__iter__()):
np.testing.assert_equal(m, p)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___le__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__le__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___len__(data):
modin_series, pandas_series = create_test_series(data)
assert len(modin_series) == len(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___long__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series[0].__long__()
except Exception as e:
with pytest.raises(type(e)):
modin_series[0].__long__()
else:
assert modin_series[0].__long__() == pandas_result
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___lt__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__lt__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___ne__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__ne__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(request, data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__neg__()
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.__neg__())
else:
df_equals(modin_series.__neg__(), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___or__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__or__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___repr__(data):
modin_series, pandas_series = create_test_series(data)
assert repr(modin_series) == repr(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___round__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(round(modin_series), round(pandas_series))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___setitem__(data):
modin_series, pandas_series = create_test_series(data)
for key in modin_series.keys():
modin_series[key] = 0
pandas_series[key] = 0
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sizeof__(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
modin_series.__sizeof__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___str__(data):
modin_series, pandas_series = create_test_series(data)
assert str(modin_series) == str(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___xor__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__xor__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.abs(), pandas_series.abs())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.add_prefix("PREFIX_ADD_"), pandas_series.add_prefix("PREFIX_ADD_")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.add_suffix("SUFFIX_ADD_"), pandas_series.add_suffix("SUFFIX_ADD_")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(data, func):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.agg(func)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.agg(func))
else:
modin_result = modin_series.agg(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_series.agg(func, axis)
else:
modin_result = modin_series.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(request, data, func):
axis = 0
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.aggregate(func, axis))
else:
modin_result = modin_series.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_series.agg(func, axis)
else:
modin_result = modin_series.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(data):
modin_series, _ = create_test_series(data) # noqa: F841
assert modin_series.aggregate("ndim") == 1
with pytest.warns(UserWarning):
modin_series.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_series.aggregate("NOT_EXISTS")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_align(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.align(modin_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_all(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.all(skipna=skipna), pandas_series.all(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_any(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.any(skipna=skipna), pandas_series.any(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(data):
modin_series, pandas_series = create_test_series(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_series.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_series.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append(pandas_series.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(modin_series.iloc[-1])
else:
modin_result = modin_series.append(modin_series.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append([pandas_series.iloc[-1]])
except Exception as e:
with pytest.raises(type(e)):
modin_series.append([modin_series.iloc[-1]])
else:
modin_result = modin_series.append([modin_series.iloc[-1]])
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_series.append(
[pandas_series, pandas_series], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(
[modin_series, modin_series], verify_integrity=verify_integrity
)
else:
modin_result = modin_series.append(
[modin_series, modin_series], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append(
pandas_series, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(modin_series, verify_integrity=verify_integrity)
else:
modin_result = modin_series.append(
modin_series, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(request, data, func):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.apply(func)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.apply(func))
else:
modin_result = modin_series.apply(func)
df_equals(modin_result, pandas_result)
def test_apply_external_lib():
json_string = """
{
"researcher": {
"name": "<NAME>",
"species": "Betelgeusian",
"relatives": [
{
"name": "<NAME>",
"species": "Betelgeusian"
}
]
}
}
"""
modin_result = pd.DataFrame.from_dict({"a": [json_string]}).a.apply(json.loads)
pandas_result = pandas.DataFrame.from_dict({"a": [json_string]}).a.apply(json.loads)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply_numeric(request, data, func):
modin_series, pandas_series = create_test_series(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_series.apply(func)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.apply(func))
else:
modin_result = modin_series.apply(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmax(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.argmax(skipna=skipna), pandas_series.argmax(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmin(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.argmin(skipna=skipna), pandas_series.argmin(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_argsort(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
modin_result = modin_series.argsort()
df_equals(modin_result, pandas_series.argsort())
def test_asfreq():
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
series.asfreq(freq="30S")
def test_asof():
series = pd.Series(
[10, 20, 30, 40, 50],
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
series.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_astype(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.astype(str)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(str)) # repr to force materialization
else:
df_equals(modin_series.astype(str), pandas_result)
try:
pandas_result = pandas_series.astype(np.int64)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(np.int64)) # repr to force materialization
else:
df_equals(modin_series.astype(np.int64), pandas_result)
try:
pandas_result = pandas_series.astype(np.float64)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(np.float64)) # repr to force materialization
else:
df_equals(modin_series.astype(np.float64), pandas_result)
def test_astype_categorical():
modin_df = pd.Series(["A", "A", "B", "B", "A"])
pandas_df = pandas.Series(["A", "A", "B", "B", "A"])
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtype == pandas_result.dtype
modin_df = pd.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])
pandas_df = pandas.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])
df_equals(modin_result, pandas_result)
assert modin_result.dtype == pandas_result.dtype
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.at[modin_series.index[0]], pandas_series.at[pandas_series.index[0]]
)
df_equals(
modin_series.at[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]
)
def test_at_time():
i = pd.date_range("2008-01-01", periods=1000, freq="12H")
modin_series = pd.Series(list(range(1000)), index=i)
pandas_series = pandas.Series(list(range(1000)), index=i)
df_equals(modin_series.at_time("12:00"), pandas_series.at_time("12:00"))
df_equals(modin_series.at_time("3:00"), pandas_series.at_time("3:00"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_autocorr(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.autocorr()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.axes[0].equals(pandas_series.axes[0])
assert len(modin_series.axes) == len(pandas_series.axes)
@pytest.mark.skip(reason="Using pandas Series.")
def test_between():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.between(None, None)
def test_between_time():
i = pd.date_range("2008-01-01", periods=1000, freq="12H")
modin_series = pd.Series(list(range(1000)), index=i)
pandas_series = pandas.Series(list(range(1000)), index=i)
df_equals(
modin_series.between_time("12:00", "17:00"),
pandas_series.between_time("12:00", "17:00"),
)
df_equals(
modin_series.between_time("3:00", "8:00"),
pandas_series.between_time("3:00", "8:00"),
)
df_equals(
modin_series.between_time("3:00", "8:00", False),
pandas_series.between_time("3:00", "8:00", False),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bfill(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.bfill(), pandas_series.bfill())
# inplace
modin_series_cp = modin_series.copy()
pandas_series_cp = pandas_series.copy()
modin_series_cp.bfill(inplace=True)
pandas_series_cp.bfill(inplace=True)
df_equals(modin_series_cp, pandas_series_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(data):
modin_series, pandas_series = create_test_series(data)
with pytest.raises(ValueError):
modin_series.bool()
with pytest.raises(ValueError):
modin_series.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_clip(request, data):
modin_series, pandas_series = create_test_series(data)
if name_contains(request.node.name, numeric_dfs):
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
# test only upper scalar bound
modin_result = modin_series.clip(None, upper)
pandas_result = pandas_series.clip(None, upper)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_series.clip(lower, upper)
pandas_result = pandas_series.clip(lower, upper)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_combine(data):
modin_series, _ = create_test_series(data) # noqa: F841
modin_series2 = modin_series % (max(modin_series) // 2)
modin_series.combine(modin_series2, lambda s1, s2: s1 if s1 < s2 else s2)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_combine_first(data):
modin_series, pandas_series = create_test_series(data)
modin_series2 = modin_series % (max(modin_series) // 2)
pandas_series2 = pandas_series % (max(pandas_series) // 2)
modin_result = modin_series.combine_first(modin_series2)
pandas_result = pandas_series.combine_first(pandas_series2)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_compress(data):
modin_series, pandas_series = create_test_series(data) # noqa: F841
try:
pandas_series.compress(pandas_series > 30)
except Exception as e:
with pytest.raises(type(e)):
modin_series.compress(modin_series > 30)
else:
modin_series.compress(modin_series > 30)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_constructor(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series, pandas_series)
df_equals(pd.Series(modin_series), pandas.Series(pandas_series))
def test_constructor_columns_and_index():
modin_series = pd.Series([1, 1, 10], index=[1, 2, 3], name="health")
pandas_series = pandas.Series([1, 1, 10], index=[1, 2, 3], name="health")
df_equals(modin_series, pandas_series)
df_equals(pd.Series(modin_series), pandas.Series(pandas_series))
df_equals(
pd.Series(modin_series, name="max_speed"),
pandas.Series(pandas_series, name="max_speed"),
)
df_equals(
pd.Series(modin_series, index=[1, 2]),
| pandas.Series(pandas_series, index=[1, 2]) | pandas.Series |
# Third party modules
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def sir_step(S, I, R, beta, gamma, N):
Sn = (-beta * S * I) + S
In = (beta * S * I - gamma * I) + I
Rn = gamma * I + R
Sn, Rn, In = (0 if x < 0 else x for x in [Sn, Rn, In])
scale = N / (Sn + In + Rn)
return (x * scale for x in [Sn, Rn, In])
def chime_sir(S, I, R, beta, gamma, n_days, beta_decay=None):
N = sum([S, I, R])
s, i, r = ([x] for x in [S, I, R])
for _ in range(n_days):
S, I, R = sir_step(S, I, R, beta, gamma, N)
beta = beta * (1 - beta_decay) if beta_decay is not None else beta
s += [S]
i += [I]
r += [R]
return (np.array(x) for x in [s, i, r])
def generate_pars(
S,
infect_0,
curr_hosp,
hosp_rate,
t_double,
contact_rate,
hosp_share,
hos_los,
icu_los,
vent_los,
R,
t_rec,
vent_rate,
icu_rate,
):
out = {}
out["S"] = S
out["infection_known"] = infect_0
out["hosp_rate"] = hosp_rate
out["vent_rate"] = vent_rate
out["icu_rate"] = icu_rate
out["hosp_los"] = hos_los
out["icu_los"] = icu_los
out["vent_los"] = vent_los
out["hosp_share"] = hosp_share
infect_total = curr_hosp / hosp_share / hosp_rate
out["I"] = infect_total
out["detect_prob"] = infect_0 / infect_total
out["R"] = R
out["growth_intrinsic"] = 2 ** (1 / t_double) - 1
out["t_rec"] = t_rec
out["gamma"] = 1 / t_rec
out["contact_rate"] = contact_rate
out["beta"] = ((out["growth_intrinsic"] + out["gamma"]) / S) * (1 - contact_rate)
out["r_t"] = out["beta"] / out["gamma"] * S
out["r_naught"] = out["r_t"] / (1 - contact_rate)
out["t_double_base"] = t_double
out["t_double_true"] = 1 / np.log2(out["beta"] * S - out["gamma"] + 1)
return out
def chime(
S,
infect_0,
curr_hosp,
hosp_rate=0.05,
t_double=6,
contact_rate=0,
hosp_share=1.0,
hos_los=7,
icu_los=9,
vent_los=10,
R=0,
t_rec=14,
beta_decay=None,
n_days=60,
vent_rate=0.01,
icu_rate=0.02,
):
"""
chime: SIR model for ventilators, ICU , and hospitilaizations.
-----------------------------
parameters:
S: population size
infect_0: number of confirmed infections
curr_hosp: number of currently hospitalized patients
hosp_rate: hospitalization_rate = 0.05
t_double: time to double without distancing = 6
contact_rate: reduction in contact due to distancing = 0
hosp_share: proportion of the population represented by hospitalization data = 1
hos_los: how long people stay in the hospital = 7
icu_los : how long people stay in the ice = 9
vent_los : how long people stay with ventilators = 10
R : number recovered present data = 0
t_rec : number of days to recover = 14
betay_decay : beta decay = None
n_days: number of days ahead to look = 60
vent_rate: rate of people who need ventilators = 0.01
icu_rate: rate of people who need icu = 0.02
"""
pars = generate_pars(
S,
infect_0,
curr_hosp,
hosp_rate,
t_double,
contact_rate,
hosp_share,
hos_los,
icu_los,
vent_los,
R,
t_rec,
vent_rate,
icu_rate,
)
s, i, r = chime_sir(
pars["S"], pars["I"], pars["R"], pars["beta"], pars["gamma"], n_days, beta_decay
)
hosp, vent, icu = (
pars[x] * i * pars["hosp_share"] for x in ["hosp_rate", "vent_rate", "icu_rate"]
)
# something is wrong in general, but will be up and running by bedtime
days = np.arange(0, n_days + 1)
data = dict(zip(["day", "hosp", "icu", "vent"], [days, hosp, icu, vent]))
proj = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# pylint: disable=C0103, C0303
from __future__ import absolute_import
#from builtins import (bytes, str, open, super, range,
# zip, round, input, int, pow, object)
import os
import io
import csv
import gzip
import zipfile
import re
import datetime
import pytz
import arrow
import iso8601
import traceback
import shutil
import numpy as np
import pandas as pd
import codecs
from pandas.core.indexing import IndexingError
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from pandas import Series, DataFrame
from dateutil import parser
from timezonefinder import TimezoneFinder
from lxml import objectify,etree
from fitparse import FitFile
try:
from .utils import (
totimestamp, format_pace, format_time,
)
from .tcxtools import strip_control_characters
except (ValueError,ImportError): # pragma: no cover
from rowingdata.utils import (
totimestamp, format_pace, format_time,
)
from rowingdata.tcxtools import strip_control_characters
import six
from six.moves import range
from six.moves import zip
import sys
if sys.version_info[0]<=2: # pragma: no cover
pythonversion = 2
readmode = 'r'
readmodebin = 'rb'
else:
readmode = 'rt'
readmodebin = 'rt'
pythonversion = 3
from io import open
# we're going to plot SI units - convert pound force to Newton
lbstoN = 4.44822
def clean_nan(x):
for i in range(len(x) - 2):
if np.isnan(x[i + 1]):
if x[i + 2] > x[i]:
x[i + 1] = 0.5 * (x[i] + x[i + 2])
if x[i + 2] < x[i]:
x[i + 1] = 0
return x
def make_converter(convertlistbase,df):
converters = {}
for key in convertlistbase:
try:
try:
values = df[key].apply(
lambda x: float(x.replace('.', '').replace(',', '.'))
)
converters[key] = lambda x: float(x.replace('.', '').replace(',', '.'))
except AttributeError:
pass
except KeyError: # pragma: no cover
pass
return converters
def flexistrptime(inttime):
try:
t = datetime.datetime.strptime(inttime, "%H:%M:%S.%f")
except ValueError:
try:
t = datetime.datetime.strptime(inttime, "%M:%S")
except ValueError:
try:
t = datetime.datetime.strptime(inttime, "%H:%M:%S")
except ValueError:
try:
t = datetime.datetime.strptime(inttime, "%M:%S.%f")
except ValueError: # pragma: no cover
t = datetime.datetime.utcnow()
return t
def flexistrftime(t):
h = t.hour
m = t.minute
s = t.second
us = t.microsecond
second = s + us / 1.e6
m = m + 60 * h
string = "{m:0>2}:{s:0>4.1f}".format(
m=m,
s=second
)
return string
def csvtests(s):
# get first and 7th line of file
try:
firstline = s[0]
except IndexError: # pragma: no cover
firstline = ''
try:
secondline = s[1]
except IndexError: # pragma: no cover
secondline = ''
try:
thirdline = s[2]
except IndexError: # pragma: no cover
thirdline = ''
try:
fourthline = s[3]
except IndexError: # pragma: no cover
fourthline = ''
try:
seventhline = s[6]
except IndexError: # pragma: no cover
seventhline = ''
try:
ninthline = s[8]
except IndexError: # pragma: no cover
ninthline = ''
if 'Potential Split' in firstline:
return 'hero'
if 'timestamp' in firstline and 'InstaSpeed' in firstline:
return 'nklinklogbook'
if 'RitmoTime' in firstline:
return 'ritmotime'
if 'Quiske' in firstline:
return 'quiske'
if 'RowDate' in firstline: # pragma: no cover
return 'rowprolog'
if 'Workout Name' in firstline:
return 'c2log'
if 'Concept2 Utility' in firstline: # pragma: no cover
return 'c2log'
if 'Concept2' in firstline: # pragma: no cover
return 'c2log'
if 'Workout #' in firstline: # pragma: no cover
return 'c2log'
if 'Activity Type' in firstline and 'Date' in firstline: # pragma: no cover
return 'c2log'
if 'Avg Watts' in firstline: # pragma: no cover
return 'c2log'
if 'Avg Speed (IMP)' in firstline:
return 'speedcoach2'
if 'LiNK' in ninthline:
return 'speedcoach2'
if 'SpeedCoach GPS Pro' in fourthline: # pragma: no cover
return 'speedcoach2'
if 'SpeedCoach GPS' in fourthline: # pragma: no cover
return 'speedcoach2'
if 'SpeedCoach GPS2' in fourthline: # pragma: no cover
return 'speedcoach2'
if 'SpeedCoach GPS Pro' in thirdline: # pragma: no cover
return 'speedcoach2'
if 'Practice Elapsed Time (s)' in firstline:
return 'mystery'
if 'Mike' in firstline and 'process' in firstline: # pragma: no cover
return 'bcmike'
if 'Club' in firstline and 'workoutType' in secondline:
return 'boatcoach'
if 'stroke500MPace' in firstline:
return 'boatcoach'
if 'Club' in secondline and 'Piece Stroke Count' in thirdline:
return 'boatcoachotw'
if 'Club' in firstline and 'Piece Stroke Count' in secondline: # pragma: no cover
return 'boatcoachotw'
if 'peak_force_pos' in firstline:
return 'rowperfect3'
if 'Hair' in seventhline:
return 'rp'
if 'smo2' in thirdline: # pragma: no cover
return 'humon'
if 'Total elapsed time (s)' in firstline:
return 'ergstick'
if 'Total elapsed time' in firstline:
return 'ergstick'
if 'Stroke Number' and 'Time (seconds)' in firstline:
return 'ergdata'
if 'Number' in firstline and 'Cal/Hr' in firstline: # pragma: no cover
return 'ergdata'
if ' DriveTime (ms)' in firstline:
return 'csv'
if 'ElapsedTime (sec)' in firstline: # pragma: no cover
return 'csv'
if 'HR' in firstline and 'Interval' in firstline and 'Avg HR' not in firstline:
return 'speedcoach'
if 'stroke.REVISION' in firstline:
return 'painsleddesktop'
if 'Date' in firstline and 'Latitude' in firstline and 'Heart rate' in firstline:
return 'kinomap'
if 'Cover' in firstline:
return 'coxmate'
if '500m Split (secs)' in firstline and 'Force Curve Data Points (Newtons)' in firstline:
return 'eth' # it's unknown but it was first submitted by a student from ETH Zurich
return 'unknown' # pragma: no cover
def get_file_type(f):
filename,extension = os.path.splitext(f)
extension = extension.lower()
if extension == '.xls': # pragma: no cover
return 'xls'
if extension == '.kml': # pragma: no cover
return 'kml'
if extension == '.txt': # pragma: no cover
if os.path.basename(f)[0:3].lower() == 'att':
return 'att'
if extension in ['.jpg','.jpeg','.tiff','.png','.gif','.bmp']: # pragma: no cover
return 'imageformat'
if extension in ['.json']: # pragma: no cover
return 'json'
if extension == '.gz':
filename,extension = os.path.splitext(filename)
if extension == '.fit':
newfile = 'temp.fit'
with gzip.open(f,'rb') as fop:
with open(newfile,'wb') as f_out:
shutil.copyfileobj(fop, f_out)
try:
FitFile(newfile, check_crc=False).parse()
return 'fit'
except: # pragma: no cover
return 'unknown'
return 'fit' # pragma: no cover
if extension == '.tcx':
try:
tree = etree.parse(f)
root = tree.getroot()
return 'tcx'
except: # pragma: no cover
try:
with open(path, 'r') as fop:
input = fop.read()
input = strip_control_characters(input)
with open('temp_xml.tcx','w') as fout:
fout.write(input)
tree = etree.parse('temp_xml.tcx')
os.remove('temp_xml.tcx')
return 'tcx'
except:
return 'unknown'
if extension == '.gpx': # pragma: no cover
try:
tree = etree.parse(f)
root = tree.getroot()
return 'gpx'
except:
return 'unknown'
with gzip.open(f, readmode) as fop:
try:
if extension == '.csv':
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
return csvtests(s)
except IOError: # pragma: no cover
return 'notgzip'
if extension == '.csv':
linecount,isbinary = get_file_linecount(f)
if linecount <= 2: # pragma: no cover
return 'nostrokes'
if isbinary: # pragma: no cover
with open(f,readmodebin) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
else:
with open(f, readmode) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
return csvtests(s)
if extension == '.tcx':
try:
tree = etree.parse(f)
root = tree.getroot()
return 'tcx'
except:
try:
with open(f, 'r') as fop:
input = fop.read()
input = strip_control_characters(input)
with open('temp_xml.tcx','w') as ftemp:
ftemp.write(input)
tree = etree.parse('temp_xml.tcx')
os.remove('temp_xml.tcx')
return 'tcx'
except:
return 'unknown'
if extension == '.gpx': # pragma: no cover
try:
tree = etree.parse(f)
root = tree.getroot()
return 'gpx'
except:
return 'unknown'
if extension == '.fit':
try:
FitFile(f, check_crc=False).parse()
except: # pragma: no cover
return 'unknown'
return 'fit'
if extension == '.zip': # pragma: no cover
try:
z = zipfile.ZipFile(f)
f2 = z.extract(z.namelist()[0])
tp = get_file_type(f2)
os.remove(f2)
return 'zip', f2, tp
except:
return 'unknown'
return 'unknown'
def get_file_linecount(f):
# extension = f[-3:].lower()
extension = os.path.splitext(f)[1].lower()
isbinary = False
if extension == '.gz': # pragma: no cover
with gzip.open(f,'rb') as fop:
count = sum(1 for line in fop if line.rstrip('\n'))
if count <= 2:
# test for \r
with gzip.open(f,readmodebin) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
count = len(s)
else:
with open(f, 'r') as fop:
try:
count = sum(1 for line in fop if line.rstrip('\n'))
except: # pragma: no cover
return 0,False
if count <= 2: # pragma: no cover
# test for \r
with open(f,readmodebin) as fop:
isbinary = True
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
count = len(s)
return count,isbinary
def get_file_line(linenr, f, isbinary=False):
line = ''
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz':
with gzip.open(f, readmode) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
else:
with open(f, readmodebin) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
return s[linenr-1]
def get_separator(linenr, f):
line = ''
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz':
with gzip.open(f, readmode) as fop:
for i in range(linenr):
line = fop.readline()
sep = ','
sniffer = csv.Sniffer()
sep = sniffer.sniff(line).delimiter
else:
with open(f, 'r') as fop:
for i in range(linenr):
line = fop.readline()
sep = ','
sniffer = csv.Sniffer()
sep = sniffer.sniff(line).delimiter
return sep
def empower_bug_correction(oarlength,inboard,a,b):
f = (oarlength-inboard-b)/(oarlength-inboard-a)
return f
def getoarlength(line):
l = float(line.split(',')[-1])
return l
def getinboard(line):
inboard = float(line.split(',')[-1])
return inboard
def getfirmware(line):
l = line.lower().split(',')
try:
firmware = l[l.index("firmware version:")+1]
except ValueError: # pragma: no cover
firmware = ''
return firmware
def get_empower_rigging(f):
oarlength = 289.
inboard = 88.
line = '1'
try:
with open(f, readmode) as fop:
for line in fop:
if 'Oar Length' in line:
try:
oarlength = getoarlength(line)
except ValueError:
return None,None
if 'Inboard' in line:
try:
inboard = getinboard(line)
except ValueError: # pragma: no cover
return None,None
except (UnicodeDecodeError,ValueError): # pragma: no cover
with gzip.open(f, readmode) as fop:
for line in fop:
if 'Oar Length' in line:
try:
oarlength = getoarlength(line)
except ValueError:
return None,None
if 'Inboard' in line: # pragma: no cover
try:
inboard = getinboard(line)
except ValueError:
return None,None
return oarlength / 100., inboard / 100.
def get_empower_firmware(f):
firmware = ''
try:
with open(f,readmode) as fop:
for line in fop:
if 'firmware' in line.lower() and 'oar' in line.lower():
firmware = getfirmware(line)
except (IndexError,UnicodeDecodeError):
with gzip.open(f,readmode) as fop:
for line in fop:
if 'firmware' in line.lower() and 'oar' in line.lower():
firmware = getfirmware(line)
try:
firmware = np.float(firmware)
except ValueError:
firmware = None
return firmware
def skip_variable_footer(f):
counter = 0
counter2 = 0
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz':
fop = gzip.open(f,readmode)
else:
fop = open(f, 'r')
for line in fop:
if line.startswith('Type') and counter > 15:
counter2 = counter
counter += 1
else:
counter += 1
fop.close()
return counter - counter2 + 1
def get_rowpro_footer(f, converters={}):
counter = 0
counter2 = 0
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz':
fop = gzip.open(f,readmode)
else:
fop = open(f, 'r')
for line in fop:
if line.startswith('Type') and counter > 15:
counter2 = counter
counter += 1
else:
counter += 1
fop.close()
return pd.read_csv(f, skiprows=counter2,
converters=converters,
engine='python',
sep=None, index_col=False)
def skip_variable_header(f):
counter = 0
counter2 = 0
sessionc = -2
summaryc = -2
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
firmware = ''
if extension == '.gz':
with gzip.open(f,readmode) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
else:
with open(f,readmodebin) as fop:
s = fop.read().replace('\r\n','\n').replace('\r','\n').split('\n')
summaryfound = False
for line in s:
if line.startswith('Session Summary'):
sessionc = counter
summaryfound = True
if line.startswith('Interval Summaries'):
summaryc = counter
if 'firmware' in line.lower() and 'oar' in line.lower():
firmware = getfirmware(line)
if line.startswith('Session Detail Data') or line.startswith('Per-Stroke Data'):
counter2 = counter
else:
counter += 1
# test for blank line
l = s[counter2+1]
# l = get_file_line(counter2 + 2, f)
if 'Interval' in l:
counter2 = counter2 - 1
summaryc = summaryc - 1
blanklines = 0
else:
blanklines = 1
return counter2 + 2, summaryc + 2, blanklines, sessionc + 2
def ritmo_variable_header(f):
counter = 0
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz': # pragma: no cover
fop = gzip.open(f,readmode)
else:
fop = open(f, 'r')
for line in fop:
if line.startswith('#'):
counter += 1
else:
fop.close()
return counter
return counter # pragma: no cover
def bc_variable_header(f):
counter = 0
extension = os.path.splitext(f)[1].lower()
# extension = f[-3:].lower()
if extension == '.gz':
fop = gzip.open(f,readmode)
else:
fop = open(f, 'r')
for line in fop:
if line.startswith('Workout'):
fop.close()
return counter+1
else:
counter += 1
fop.close() # pragma: no cover
return 0 # pragma: no cover
def make_cumvalues_array(xvalues,doequal=False):
""" Takes a Pandas dataframe with one column as input value.
Tries to create a cumulative series.
"""
try:
newvalues = 0.0 * xvalues
except TypeError: # pragma: no cover
return [xvalues,0]
dx = np.diff(xvalues)
dxpos = dx
nrsteps = len(dxpos[dxpos < 0])
lapidx = np.append(0, np.cumsum((-dx + abs(dx)) / (-2 * dx)))
if doequal:
lapidx[0] = 0
cntr = 0
for i in range(len(dx)-1):
if dx[i+1] <= 0:
cntr += 1
lapidx[i+1] = cntr
if nrsteps > 0:
indexes = np.where(dxpos < 0)
for index in indexes:
dxpos[index] = xvalues[index + 1]
newvalues = np.append(0, np.cumsum(dxpos)) + xvalues[0]
else:
newvalues = xvalues
return [newvalues, abs(lapidx)]
def make_cumvalues(xvalues):
""" Takes a Pandas dataframe with one column as input value.
Tries to create a cumulative series.
"""
newvalues = 0.0 * xvalues
dx = xvalues.diff()
dxpos = dx
mask = -xvalues.diff() > 0.9 * xvalues
nrsteps = len(dx.loc[mask])
lapidx = np.cumsum((-dx + abs(dx)) / (-2 * dx))
lapidx = lapidx.fillna(value=0)
test = len(lapidx.loc[lapidx.diff() < 0])
if test != 0:
lapidx = np.cumsum((-dx + abs(dx)) / (-2 * dx))
lapidx = lapidx.fillna(method='ffill')
lapidx.loc[0] = 0
if nrsteps > 0:
dxpos[mask] = xvalues[mask]
newvalues = np.cumsum(dxpos) + xvalues.iloc[0]
newvalues.iloc[0] = xvalues.iloc[0]
else:
newvalues = xvalues
newvalues = newvalues.replace([-np.inf, np.inf], np.nan)
newvalues.fillna(method='ffill', inplace=True)
newvalues.fillna(method='bfill', inplace=True)
lapidx.fillna(method='bfill', inplace=True)
return [newvalues, lapidx]
def timestrtosecs(string):
dt = parser.parse(string, fuzzy=True)
secs = 3600 * dt.hour + 60 * dt.minute + dt.second
return secs
def speedtopace(v, unit='ms'):
if unit == 'kmh':
v = v * 1000 / 3600.
if v > 0:
p = 500. / v
else:
p = np.nan
return p
def timestrtosecs2(timestring, unknown=0):
try:
h, m, s = timestring.split(':')
sval = 3600 * int(h) + 60. * int(m) + float(s)
except ValueError:
try:
m, s = timestring.split(':')
sval = 60. * int(m) + float(s)
except ValueError:
sval = unknown
return sval
def getcol(df, column='TimeStamp (sec)'):
if column:
try:
return df[column]
except KeyError:
pass
l = len(df.index)
return Series(np.zeros(l))
class CSVParser(object):
""" Parser for reading CSV files created by Painsled
"""
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else:
csvfile = kwargs.pop('csvfile', 'test.csv')
skiprows = kwargs.pop('skiprows', 0)
usecols = kwargs.pop('usecols', None)
sep = kwargs.pop('sep', ',')
engine = kwargs.pop('engine', 'c')
skipfooter = kwargs.pop('skipfooter', 0)
converters = kwargs.pop('converters', None)
self.csvfile = csvfile
if engine == 'python':
self.df = pd.read_csv(
csvfile, skiprows=skiprows, usecols=usecols,
sep=sep, engine=engine, skipfooter=skipfooter,
converters=converters, index_col=False,
compression='infer',
)
else:
self.df = pd.read_csv(
csvfile, skiprows=skiprows, usecols=usecols,
sep=sep, engine=engine, skipfooter=skipfooter,
converters=converters, index_col=False,
compression='infer',
#error_bad_lines = False
)
self.df = self.df.fillna(method='ffill')
self.defaultcolumnnames = [
'TimeStamp (sec)',
' Horizontal (meters)',
' Cadence (stokes/min)',
' HRCur (bpm)',
' Stroke500mPace (sec/500m)',
' Power (watts)',
' DriveLength (meters)',
' StrokeDistance (meters)',
' DriveTime (ms)',
' DragFactor',
' StrokeRecoveryTime (ms)',
' AverageDriveForce (lbs)',
' PeakDriveForce (lbs)',
' lapIdx',
' ElapsedTime (sec)',
' latitude',
' longitude',
]
try:
x = self.df['TimeStamp (sec)']
except KeyError:
cols = self.df.columns
for col in cols:
if 'TimeStamp ' in col: # pragma: no cover
self.df['TimeStamp (sec)'] = self.df[col]
self.columns = {c: c for c in self.defaultcolumnnames}
def to_standard(self):
inverted = {value: key for key, value in six.iteritems(self.columns)}
self.df.rename(columns=inverted, inplace=True)
self.columns = {c: c for c in self.defaultcolumnnames}
def time_values(self, *args, **kwargs):
timecolumn = kwargs.pop('timecolumn', 'TimeStamp (sec)')
unixtimes = self.df[timecolumn]
return unixtimes
def write_csv(self, *args, **kwargs):
if self.df.empty: # pragma: no cover
return None
isgzip = kwargs.pop('gzip', False)
writeFile = args[0]
# defaultmapping ={c:c for c in self.defaultcolumnnames}
self.columns = kwargs.pop('columns', self.columns)
unixtimes = self.time_values(
timecolumn=self.columns['TimeStamp (sec)'])
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.df[
self.columns[' ElapsedTime (sec)']
] = unixtimes - unixtimes.iloc[0]
# Default calculations
pace = self.df[
self.columns[' Stroke500mPace (sec/500m)']].replace(0, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
datadict = {name: getcol(self.df, self.columns[name])
for name in self.columns}
# Create data frame with all necessary data to write to csv
data = DataFrame(datadict)
data = data.sort_values(by='TimeStamp (sec)', ascending=True)
data = data.fillna(method='ffill')
# drop all-zero columns
for c in data.columns:
if (data[c] == 0).any() and data[c].mean() == 0:
data = data.drop(c, axis=1)
if isgzip: # pragma: no cover
return data.to_csv(writeFile + '.gz', index_label='index',
compression='gzip')
else:
return data.to_csv(writeFile, index_label='index')
# Parsing ETH files
class ETHParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
super(ETHParser, self).__init__(*args, **kwargs)
self.cols = [
'',
'Distance (meters)',
'Stroke Rate (s/m)',
'Heart Rate (bpm)',
'500m Split (secs)',
'Power (Watts)',
'Drive Length (meters)',
'',
'Drive Time (secs)',
'',
'',
'Average Drive Force (Newtons)',
'Peak Force (Newtons)',
'',
'Time (secs)',
'',
'',
]
self.cols = [b if a == '' else a
for a,b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
self.df[self.columns[' DriveTime (ms)']] *= 1000.
startdatetime = datetime.datetime.utcnow()
elapsed = self.df[self.columns[' ElapsedTime (sec)']]
starttimeunix = arrow.get(startdatetime).timestamp()
unixtimes = starttimeunix+elapsed
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes-starttimeunix
self.to_standard()
# Parsing CSV files from Humon
class HumonParser(CSVParser): # pragma: no cover
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
skiprows = 2
kwargs['skiprows'] = skiprows
super(HumonParser, self).__init__(*args, **kwargs)
self.cols = [
'Time [seconds]',
'distance [meters]',
'',
'heartRate [bpm]',
'speed [meters/sec]',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'Time [seconds]',
'latitude [degrees]',
'longitude [degrees]',
]
self.cols = [b if a == '' else a
for a,b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
velo = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = 500./velo
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
# get date from header
dateline = get_file_line(2,csvfile)
row_datetime = parser.parse(dateline, fuzzy=True,yearfirst=True,dayfirst=False)
timestamp = arrow.get(row_datetime).timestamp()
time = self.df[self.columns['TimeStamp (sec)']]
time += timestamp
self.df[self.columns['TimeStamp (sec)']] = time
self.to_standard()
class RitmoTimeParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
skiprows = ritmo_variable_header(csvfile)
kwargs['skiprows'] = skiprows
separator = get_separator(skiprows+2, csvfile)
kwargs['sep'] = separator
super(RitmoTimeParser, self).__init__(*args, **kwargs)
# crude EU format detector
try:
ll = self.df['Longitude (deg)']*10.0
except TypeError: # pragma: no cover
convertlistbase = [
'Total Time (sec)',
'Rate (spm)',
'Distance (m)',
'Speed (m/s)',
'Latitude (deg)',
'Longitude (deg)',
]
converters = make_converter(convertlistbase,self.df)
kwargs['converters'] = converters
super(RitmoTimeParser, self).__init__(*args, **kwargs)
self.cols = [
'',
'Distance (m)',
'Rate (spm)',
'Heart Rate (bpm)',
'Split (/500m)',
'',
'',
'',
'',
'',
'',
'',
'',
'Piece#',
'Total Time (sec)',
'Latitude (deg)',
'Longitude (deg)',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations / speed
velo = self.df['Speed (m/s)']
pace = 500. / velo
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
# add rest from state column
self.df[' WorkoutState'] = self.df['State'].apply(lambda x: 3 if x.lower()=='rest' else 4)
# try date from first line
firstline = get_file_line(1, csvfile)
try:
startdatetime = parser.parse(firstline,fuzzy=True)
except ValueError: # pragma: no cover
startdatetime = datetime.datetime.utcnow()
if startdatetime.tzinfo is None:
try:
latavg = self.df[self.columns[' latitude']].mean()
lonavg = self.df[self.columns[' longitude']].mean()
tf = TimezoneFinder()
timezone_str = tf.timezone_at(lng=lonavg, lat=latavg)
if timezone_str is None: # pragma: no cover
timezone_str = tf.closest_timezone_at(lng=lonavg,lat=latavg)
startdatetime = pytz.timezone(timezone_str).localize(startdatetime)
except KeyError: # pragma: no cover
startdatetime = pytz.timezone('UTC').localize(startdatetime)
timezonestr = 'UTC'
elapsed = self.df[self.columns[' ElapsedTime (sec)']]
starttimeunix = arrow.get(startdatetime).timestamp()
#tts = startdatetime + elapsed.apply(lambda x: datetime.timedelta(seconds=x))
#unixtimes=tts.apply(lambda x: time.mktime(x.utctimetuple()))
#unixtimes = tts.apply(lambda x: arrow.get(
# x).timestamp() + arrow.get(x).microsecond / 1.e6)
unixtimes = starttimeunix+elapsed
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.to_standard()
class QuiskeParser(CSVParser):
def __init__(self, *args, **kwargs):
kwargs['skiprows'] = 1
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
super(QuiskeParser, self).__init__(*args, **kwargs)
self.cols = [
'timestamp(s)',
'distance(m)',
'SPM (strokes per minute)',
'',
' Stroke500mPace (sec/500m)',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'latitude',
'longitude',
'Catch',
'Finish',
]
self.defaultcolumnnames += [
'catch',
'finish',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
velo = self.df['speed (m/s)']
pace = 500./velo
pace = pace.replace(np.nan, 300)
pace = pace.replace(np.inf, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
unixtimes = self.df[self.columns['TimeStamp (sec)']]
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes.iloc[0]
self.df[self.columns['catch']] = 0
self.df[self.columns['finish']] = self.df['stroke angle (deg)']
self.to_standard()
class BoatCoachOTWParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
skiprows = bc_variable_header(csvfile)
kwargs['skiprows'] = skiprows
separator = get_separator(3, csvfile)
kwargs['sep'] = separator
super(BoatCoachOTWParser, self).__init__(*args, **kwargs)
# 500m or km based
try:
pace = self.df['Last 10 Stroke Speed(/500m)']
except KeyError: # pragma: no cover
pace1 = self.df['Last 10 Stroke Speed(/km)']
self.df['Last 10 Stroke Speed(/500m)'] = pace1.values
# crude EU format detector
try:
ll = self.df['Longitude']*10.0
except TypeError: # pragma: no cover
convertlistbase = [
'TOTAL Distance Since Start BoatCoach(m)',
'Stroke Rate',
'Heart Rate',
'Latitude',
'Longitude',
]
converters = make_converter(convertlistbase,self.df)
kwargs['converters'] = converters
super(BoatCoachOTWParser, self).__init__(*args, **kwargs)
self.cols = [
'DateTime',
'TOTAL Distance Since Start BoatCoach(m)',
'Stroke Rate',
'Heart Rate',
'Last 10 Stroke Speed(/500m)',
'',
'',
'',
'',
'',
'',
'',
'',
'Piece Number',
'Elapsed Time',
'Latitude',
'Longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
try:
row_datetime = self.df[self.columns['TimeStamp (sec)']]
row_date = parser.parse(row_datetime[0], fuzzy=True,yearfirst=True,dayfirst=False)
row_datetime = row_datetime.apply(
lambda x: parser.parse(x, fuzzy=True,yearfirst=True,dayfirst=False))
unixtimes = row_datetime.apply(lambda x: arrow.get(
x).timestamp() + arrow.get(x).microsecond / 1.e6)
except KeyError: # pragma: no cover
row_date2 = arrow.get(row_date).timestamp()
timecolumn = self.df[self.columns[' ElapsedTime (sec)']]
timesecs = timecolumn.apply(timestrtosecs)
timesecs = make_cumvalues(timesecs)[0]
unixtimes = row_date2 + timesecs
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
try: # pragma: no cover
d = self.df['Last 10 Stroke Speed(/km)']
multiplicator = 0.5
except:
multiplicator = 1
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']].apply(
timestrtosecs2
)
pace *= multiplicator
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
self.to_standard()
class CoxMateParser(CSVParser):
def __init__(self, *args, **kwargs):
super(CoxMateParser, self).__init__(*args, **kwargs)
# remove "00 waiting to row"
self.cols = [
'',
'Distance',
'Rating',
'Heart Rate',
'',
'',
'',
'Cover',
'',
'',
'',
'',
'',
'',
'Time',
'',
'',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations / speed
dd = self.df[self.columns[' Horizontal (meters)']].diff()
dt = self.df[self.columns[' ElapsedTime (sec)']].diff()
velo = dd / dt
pace = 500. / velo
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
# calculations / time stamp
# convert to unix style time stamp
now = datetime.datetime.utcnow()
elapsed = self.df[self.columns[' ElapsedTime (sec)']]
tts = now + elapsed.apply(lambda x: datetime.timedelta(seconds=x))
#unixtimes=tts.apply(lambda x: time.mktime(x.utctimetuple()))
unixtimes = tts.apply(lambda x: arrow.get(
x).timestamp() + arrow.get(x).microsecond / 1.e6)
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.to_standard()
class painsledDesktopParser(CSVParser):
def __init__(self, *args, **kwargs):
super(painsledDesktopParser, self).__init__(*args, **kwargs)
# remove "00 waiting to row"
self.df = self.df[self.df[' stroke.endWorkoutState']
!= ' "00 waiting to row"']
self.cols = [
' stroke.driveStartMs',
' stroke.startWorkoutMeter',
' stroke.strokesPerMin',
' stroke.hrBpm',
' stroke.paceSecPer1k',
' stroke.watts',
' stroke.driveMeters',
' stroke.strokeMeters',
' stroke.driveMs',
' stroke.dragFactor',
' stroke.slideMs',
'',
'',
' stroke.intervalNumber',
' stroke.driveStartMs',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']] / 2.
pace = np.clip(pace, 0, 1e4)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
timestamps = self.df[self.columns['TimeStamp (sec)']]
# convert to unix style time stamp
tts = timestamps.apply(lambda x: iso8601.parse_date(x[2:-1]))
#unixtimes=tts.apply(lambda x: time.mktime(x.utctimetuple()))
unixtimes = tts.apply(lambda x: arrow.get(x).timestamp() + arrow.get(x).microsecond / 1.e6)
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[
self.columns[' ElapsedTime (sec)']
] = unixtimes - unixtimes.iloc[0]
self.to_standard()
class BoatCoachParser(CSVParser):
def __init__(self, *args, **kwargs):
kwargs['skiprows'] = 1
kwargs['usecols'] = list(range(25))
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
ll = get_file_line(1,csvfile)
if 'workoutType' in ll:
kwargs['skiprows'] = 0
separator = get_separator(2, csvfile)
kwargs['sep'] = separator
super(BoatCoachParser, self).__init__(*args, **kwargs)
# crude EU format detector
try:
p = self.df['stroke500MPace'] * 500.
except TypeError:
convertlistbase = ['workDistance',
'strokeRate',
'currentHeartRate',
'strokePower',
'strokeLength',
'strokeDriveTime',
'dragFactor',
'strokeAverageForce',
'strokePeakForce',
'intervalCount']
converters = make_converter(convertlistbase,self.df)
kwargs['converters'] = converters
super(BoatCoachParser, self).__init__(*args, **kwargs)
self.cols = [
'DateTime',
'workDistance',
'strokeRate',
'currentHeartRate',
'stroke500MPace',
'strokePower',
'strokeLength',
'',
'strokeDriveTime',
'dragFactor',
' StrokeRecoveryTime (ms)',
'strokeAverageForce',
'strokePeakForce',
'intervalCount',
'workTime',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# get date from footer
try:
try:
with open(csvfile, readmode) as fop:
line = fop.readline()
dated = re.split('Date:', line)[1][1:-1]
except (IndexError,UnicodeDecodeError):
with gzip.open(csvfile,readmode) as fop:
line = fop.readline()
dated = re.split('Date:', line)[1][1:-1]
row_date = parser.parse(dated, fuzzy=True,yearfirst=True,dayfirst=False)
except IOError:
pass
try:
datetime = self.df[self.columns['TimeStamp (sec)']]
row_date = parser.parse(datetime[0], fuzzy=True,yearfirst=True,dayfirst=False)
row_datetime = datetime.apply(lambda x: parser.parse(x, fuzzy=True,yearfirst=True,dayfirst=False))
unixtimes = row_datetime.apply(
lambda x: arrow.get(x).timestamp() + arrow.get(x).microsecond / 1.e6
)
except KeyError:
# calculations
# row_date2=time.mktime(row_date.utctimetuple())
row_date2 = arrow.get(row_date).timestamp()
timecolumn = self.df[self.columns[' ElapsedTime (sec)']]
timesecs = timecolumn.apply(timestrtosecs)
timesecs = make_cumvalues(timesecs)[0]
unixtimes = row_date2 + timesecs
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']].apply(
timestrtosecs)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
self.df[self.columns[' DriveTime (ms)']] = 1.0e3 * \
self.df[self.columns[' DriveTime (ms)']]
drivetime = self.df[self.columns[' DriveTime (ms)']]
stroketime = 60. * 1000. / \
(1.0 * self.df[self.columns[' Cadence (stokes/min)']])
recoverytime = stroketime - drivetime
recoverytime.replace(np.inf, np.nan)
recoverytime.replace(-np.inf, np.nan)
recoverytime = recoverytime.fillna(method='bfill')
self.df[self.columns[' StrokeRecoveryTime (ms)']] = recoverytime
# Reset Interval Count by StrokeCount
res = make_cumvalues(self.df['strokeCount'])
lapidx = res[1]
strokecount = res[0]
self.df['strokeCount'] = strokecount
if lapidx.max() > 1:
self.df[self.columns[' lapIdx']] = lapidx
# Recalculate power
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = np.clip(pace, 0, 1e4)
pace = pace.replace(0, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
velocity = 500. / (1.0 * pace)
power = 2.8 * velocity**3
dif = abs(power - self.df[self.columns[' Power (watts)']])
moving = self.df[self.columns[' Horizontal (meters)']].diff()
moving = moving.apply(lambda x:abs(x))
power[dif < 5] = self.df[self.columns[' Power (watts)']][dif < 5]
power[dif > 1000] = self.df[self.columns[' Power (watts)']][dif > 1000]
power[moving <= 1] = 0
self.df[self.columns[' Power (watts)']] = power
# Calculate Stroke Rate during rest
mask = (self.df['intervalType'] == 'Rest')
for strokenr in self.df.loc[mask, 'strokeCount'].unique():
mask2 = self.df['strokeCount'] == strokenr
strokes = self.df.loc[mask2, 'strokeCount']
timestamps = self.df.loc[mask2, self.columns['TimeStamp (sec)']]
strokeduration = len(strokes) * timestamps.diff().mean()
spm = 60. / strokeduration
self.df.loc[mask2, self.columns[' Cadence (stokes/min)']] = spm
# get stroke power
data = []
try:
with gzip.open(csvfile,readmode) as f:
for line in f:
s = line.split(',')
data.append(','.join([str(x) for x in s[26:-1]]))
except IOError:
with open(csvfile,'r') as f:
for line in f:
s = line.split(',')
data.append(','.join([str(x) for x in s[26:-1]]))
try:
self.df['PowerCurve'] = data[2:]
except ValueError:
pass
# dump empty lines at end
endhorizontal = self.df.loc[self.df.index[-1],
self.columns[' Horizontal (meters)']]
if endhorizontal == 0:
self.df.drop(self.df.index[-1], inplace=True)
res = make_cumvalues(self.df[self.columns[' Horizontal (meters)']])
self.df['cumdist'] = res[0]
maxdist = self.df['cumdist'].max()
mask = (self.df['cumdist'] == maxdist)
while len(self.df.loc[mask]) > 2:
mask = (self.df['cumdist'] == maxdist)
self.df.drop(self.df.index[-1], inplace=True)
mask = (self.df['cumdist'] == maxdist)
try:
self.df.loc[
mask,
self.columns[' lapIdx']
] = self.df.loc[self.df.index[-3], self.columns[' lapIdx']]
except IndexError: # pragma: no cover
pass
self.to_standard()
class KinoMapParser(CSVParser):
def __init__(self, *args, **kwargs):
kwargs['skiprows'] = 0
#kwargs['usecols'] = range(25)
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
super(KinoMapParser, self).__init__(*args, **kwargs)
self.cols = [
'Date',
'Distance',
'Cadence',
'Heart rate',
'Speed',
'Power',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'Latitude',
'Longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
row_datetime = self.df[self.columns['TimeStamp (sec)']]
row_datetime = row_datetime.apply(
lambda x: parser.parse(x, fuzzy=True,yearfirst=True,dayfirst=False))
#unixtimes=datetime.apply(lambda x: time.mktime(x.utctimetuple()))
unixtimes = row_datetime.apply(lambda x: arrow.get(
x).timestamp() + arrow.get(x).microsecond / 1.e6)
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']].apply(
lambda x: speedtopace(x, unit='kmh'))
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
res = make_cumvalues(self.df[self.columns[' Horizontal (meters)']])
self.df['cumdist'] = res[0]
maxdist = self.df['cumdist'].max()
mask = (self.df['cumdist'] == maxdist)
while len(self.df[mask]) > 2: # pragma: no cover
mask = (self.df['cumdist'] == maxdist)
self.df.drop(self.df.index[-1], inplace=True)
mask = (self.df['cumdist'] == maxdist)
self.to_standard()
class BoatCoachAdvancedParser(CSVParser):
def __init__(self, *args, **kwargs): # pragma: no cover
kwargs['skiprows'] = 1
kwargs['usecols'] = list(range(25))
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
separator = get_separator(2, csvfile)
kwargs['sep'] = separator
super(BoatCoachAdvancedParser, self).__init__(*args, **kwargs)
# crude EU format detector
try:
p = self.df['stroke500MPace'] * 500.
except TypeError:
convertlistbase = [
'workDistance',
'strokeRate',
'currentHeartRate',
'strokePower',
'strokeLength',
'strokeDriveTime',
'dragFactor',
'strokeAverageForce',
'strokePeakForce',
'intervalCount',
]
converters = make_converter(convertlistbase,self.df)
kwargs['converters'] = converters
super(BoatCoachParser, self).__init__(*args, **kwargs)
self.cols = [
'DateTime',
'workDistance',
'strokeRate',
'currentHeartRate',
'stroke500MPace',
'strokePower',
'strokeLength',
'',
'strokeDriveTime',
'dragFactor',
' StrokeRecoveryTime (ms)',
'strokeAverageForce',
'strokePeakForce',
'intervalCount',
'workTime',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# get date from footer
try:
with open(csvfile, 'r') as fop:
line = fop.readline()
dated = re.split('Date:', line)[1][1:-1]
except (IndexError,UnicodeDecodeError):
with gzip.open(csvfile,readmode) as fop:
line = fop.readline()
dated = re.split('Date:', line)[1][1:-1]
row_date = parser.parse(dated, fuzzy=True,yearfirst=True,dayfirst=False)
try:
row_datetime = self.df[self.columns['TimeStamp (sec)']]
row_date = parser.parse(datetime[0], fuzzy=True,yearfirst=True,dayfirst=False)
rowdatetime = row_datetime.apply(lambda x: parser.parse(x, fuzzy=True,yearfirst=True,dayfirst=False))
unixtimes = row_datetime.apply(lambda x: arrow.get(
x).timestamp() + arrow.get(x).microsecond / 1.e6)
except KeyError:
# calculations
# row_date2=time.mktime(row_date.utctimetuple())
row_date2 = arrow.get(row_date).timestamp()
timecolumn = self.df[self.columns[' ElapsedTime (sec)']]
timesecs = timecolumn.apply(timestrtosecs)
timesecs = make_cumvalues(timesecs)[0]
unixtimes = row_date2 + timesecs
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']].apply(
timestrtosecs)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
self.df[self.columns[' DriveTime (ms)']] = 1.0e3 * \
self.df[self.columns[' DriveTime (ms)']]
# Calculate Recovery Time
drivetime = self.df[self.columns[' DriveTime (ms)']]
stroketime = 60. * 1000. / \
(1.0 * self.df[self.columns[' Cadence (stokes/min)']])
recoverytime = stroketime - drivetime
recoverytime.replace(np.inf, np.nan)
recoverytime.replace(-np.inf, np.nan)
recoverytime = recoverytime.fillna(method='bfill')
self.df[self.columns[' StrokeRecoveryTime (ms)']] = recoverytime
# Reset Interval Count by StrokeCount
res = make_cumvalues(self.df['strokeCount'])
lapidx = res[1]
strokecount = res[0]
self.df['strokeCount'] = strokecount
if lapidx.max() > 1:
self.df[self.columns[' lapIdx']] = lapidx
lapmax = self.df[self.columns[' lapIdx']].max()
# Recalculate power
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = np.clip(pace, 0, 1e4)
pace = pace.replace(0, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
velocity = 500. / pace
power = 2.8 * velocity**3
self.df[self.columns[' Power (watts)']] = power
# Calculate Stroke Rate during rest
mask = (self.df['intervalType'] == 'Rest')
for strokenr in self.df.loc[mask, 'strokeCount'].unique():
mask2 = self.df['strokeCount'] == strokenr
strokes = self.df.loc[mask2, 'strokeCount']
timestamps = self.df.loc[mask2, self.columns['TimeStamp (sec)']]
strokeduration = len(strokes) * timestamps.diff().mean()
spm = 60. / strokeduration
self.df.loc[mask2, self.columns[' Cadence (stokes/min)']] = spm
# dump empty lines at end
endhorizontal = self.df.loc[self.df.index[-1],
self.columns[' Horizontal (meters)']]
if endhorizontal == 0:
self.df.drop(self.df.index[-1], inplace=True)
res = make_cumvalues(self.df[self.columns[' Horizontal (meters)']])
self.df['cumdist'] = res[0]
maxdist = self.df['cumdist'].max()
mask = (self.df['cumdist'] == maxdist)
while len(self.df[mask]) > 2:
mask = (self.df['cumdist'] == maxdist)
self.df.drop(self.df.index[-1], inplace=True)
mask = (self.df['cumdist'] == maxdist)
self.df.loc[
mask,
self.columns[' lapIdx']
] = self.df.loc[self.df.index[-3], self.columns[' lapIdx']]
self.to_standard()
class ErgDataParser(CSVParser):
def __init__(self, *args, **kwargs):
super(ErgDataParser, self).__init__(*args, **kwargs)
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
self.cols = [
'Time (seconds)',
'Distance (meters)',
'Stroke Rate',
'Heart Rate',
'Pace (seconds per 500m',
' Power (watts)',
'',
'',
'',
'',
'',
'',
'',
' lapIdx',
'Time(sec)',
' latitude',
' longitude',
]
try:
pace = self.df[self.cols[4]]
except KeyError: # pragma: no cover
self.cols[4] = 'Pace (seconds per 500m)'
try:
pace = self.df[self.cols[4]]
except KeyError:
self.cols[4] = 'Pace (seconds)'
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
# get date from footer
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
try:
pace = np.clip(pace, 0, 1e4)
pace = pace.replace(0, 300)
except TypeError: # pragma: no cover
pass
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
seconds = self.df[self.columns['TimeStamp (sec)']]
firststrokeoffset = seconds.values[0]
res = make_cumvalues(seconds)
seconds2 = res[0] + seconds[0]
lapidx = res[1]
unixtime = seconds2 + totimestamp(self.row_date)
velocity = 500. / pace
power = 2.8 * velocity**3
self.df[self.columns['TimeStamp (sec)']] = unixtime
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtime - unixtime[0]
self.df[self.columns[' ElapsedTime (sec)']] += firststrokeoffset
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns[' Power (watts)']] = power
self.to_standard()
class HeroParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs.pop('csvfile', None)
headerdata = get_file_line(2,csvfile).split(',')
rowdatetime = arrow.get(headerdata[0],'YYYY-MM-DD HH:mm:ss ZZ')
dragfactor = headerdata[6]
kwargs['skiprows'] = 3
super(HeroParser, self).__init__(*args, **kwargs)
self.cols = [
'Time',
'Distance',
'Stroke Rate',
'HR',
'Split',
'Watts',
'Stroke Length',
'Distance per Stroke',
'Drive Time',
'Drag Factor',
'Recovery Time',
'Average Drive Force (N)',
'Peak Drive Force (N)',
'',
'',
'',
'',
]
starttimeunix = rowdatetime.timestamp()
self.cols = [b if a == '' else a
for a,b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
self.df[self.columns[' DriveTime (ms)']] *= 1000
self.df[self.columns[' StrokeRecoveryTime (ms)']] *= 1000
self.df[self.columns[' DriveLength (meters)']] /= 100.
self.df[self.columns[' AverageDriveForce (lbs)']] /= lbstoN
self.df[self.columns[' PeakDriveForce (lbs)']] /= lbstoN
time = self.df[self.columns['TimeStamp (sec)']].apply(timestrtosecs2)
self.df[self.columns['TimeStamp (sec)']] = time+starttimeunix
self.df[' ElapsedTime (sec)'] = time
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']].apply(timestrtosecs2)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
self.to_standard()
# pace column
class speedcoachParser(CSVParser):
def __init__(self, *args, **kwargs):
super(speedcoachParser, self).__init__(*args, **kwargs)
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
self.cols = [
'Time(sec)',
'Distance(m)',
'Rate',
'HR',
'Split(sec)',
' Power (watts)',
'',
'',
'',
'',
'',
'',
'',
' lapIdx',
'Time(sec)',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
# get date from footer
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = np.clip(pace, 0, 1e4)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
seconds = self.df[self.columns['TimeStamp (sec)']]
unixtimes = seconds + totimestamp(self.row_date)
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
self.to_standard()
class ErgStickParser(CSVParser):
def __init__(self, *args, **kwargs):
super(ErgStickParser, self).__init__(*args, **kwargs)
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
self.cols = [
'Total elapsed time (s)',
'Total distance (m)',
'Stroke rate (/min)',
'Current heart rate (bpm)',
'Current pace (/500m)',
'Split average power (W)',
'Drive length (m)',
'Stroke distance (m)',
'Drive time (s)',
'Drag factor',
'Stroke recovery time (s)',
'Ave. drive force (lbs)',
'Peak drive force (lbs)',
' lapIdx',
'Total elapsed time (s)',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
try:
self.df[self.columns[' DriveTime (ms)']] *= 1000.
self.df[self.columns[' StrokeRecoveryTime (ms)']] *= 1000.
except KeyError:
pass
try:
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = np.clip(pace, 1, 1e4)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
except TypeError:
pace = self.df[self.columns[' Stroke500mPace (sec/500m)']]
pace = pace.apply(lambda x:flexistrptime(x))
pace = pace.apply(lambda x:60*x.minute+x.second+x.microsecond/1.e6)
pace = np.clip(pace, 1, 1e4)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
# check distance
try:
distance = self.df[self.columns[' Horizontal (meters)']]
except KeyError:
self.columns[' Horizontal (meters)'] = 'Total distance'
distance = self.df[self.columns[' Horizontal (meters)']]
distance = distance.apply(lambda x:int(x[:-2]))
self.df[self.columns[' Horizontal (meters)']] = distance
#velocity = 500. / pace
#power = 2.8 * velocity**3
#self.df[' Power (watts)'] = power
try:
seconds = self.df[self.columns['TimeStamp (sec)']]
except:
self.columns['TimeStamp (sec)'] = 'Total elapsed time'
seconds = self.df[self.columns['TimeStamp (sec)']]
seconds = seconds.apply(lambda x:flexistrptime(x))
seconds = seconds.apply(lambda x:3600*x.hour+60*x.minute+x.second+x.microsecond/1.e6)
res = make_cumvalues(seconds)
seconds2 = res[0] + seconds[0]
lapidx = res[1]
unixtimes = seconds2 + totimestamp(self.row_date)
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes.iloc[0]
self.to_standard()
class RowPerfectParser(CSVParser):
def __init__(self, *args, **kwargs):
super(RowPerfectParser, self).__init__(*args, **kwargs)
# get stroke curve
try:
data = self.df['curve_data'].str[1:-1].str.split(',',
expand=True)
data = data.apply(pd.to_numeric, errors = 'coerce')
for cols in data.columns.tolist()[1:]:
data[data<0] = np.nan
s = []
for row in data.values.tolist():
s.append(str(row)[1:-1])
self.df['curve_data'] = s
except AttributeError as e:
pass
# print traceback.format_exc()
for c in self.df.columns:
if c != 'curve_data':
self.df[c] = pd.to_numeric(self.df[c], errors='coerce')
self.df.sort_values(by=['workout_interval_id', 'stroke_number'],
ascending=[True, True], inplace=True)
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
self.cols = [
'time',
'distance',
'stroke_rate',
'pulse',
'',
'power',
'stroke_length',
'distance_per_stroke',
'drive_time',
'k',
'recover_time',
'',
'peak_force',
'workout_interval_id',
'time',
' latitude',
' longitude',
'work_per_pulse'
]
self.defaultcolumnnames += [
'driveenergy'
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
self.df[self.columns[' DriveTime (ms)']] *= 1000.
self.df[self.columns[' StrokeRecoveryTime (ms)']] *= 1000.
self.df[self.columns[' PeakDriveForce (lbs)']] /= lbstoN
self.df[self.columns[' DriveLength (meters)']] /= 100.
wperstroke = self.df['energy_per_stroke']
fav = wperstroke / self.df[self.columns[' DriveLength (meters)']]
fav /= lbstoN
self.df[self.columns[' AverageDriveForce (lbs)']] = fav
power = self.df[self.columns[' Power (watts)']]
v = (power / 2.8)**(1. / 3.)
pace = 500. / v
self.df[' Stroke500mPace (sec/500m)'] = pace
seconds = self.df[self.columns['TimeStamp (sec)']]
newseconds,lapidx = make_cumvalues_array(seconds)
newstrokenr,lapidx = make_cumvalues_array(self.df['stroke_number'],doequal=True)
seconds2 = pd.Series(newseconds)+newseconds[0]
res = make_cumvalues(seconds)
#seconds2 = res[0] + seconds[0]
#lapidx = res[1]
unixtime = seconds2 + totimestamp(self.row_date)
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns['TimeStamp (sec)']] = unixtime
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtime - unixtime.iloc[0]
self.to_standard()
class MysteryParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
separator = get_separator(1, csvfile)
kwargs['sep'] = separator
super(MysteryParser, self).__init__(*args, **kwargs)
self.df = self.df.drop(self.df.index[[0]])
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
kwargs['engine'] = 'python'
kwargs['sep'] = None
self.row_date = kwargs.pop('row_date', datetime.datetime.utcnow())
self.cols = [
'Practice Elapsed Time (s)',
'Distance (m)',
'Stroke Rate (SPM)',
'HR (bpm)',
' Stroke500mPace (sec/500m)',
' Power (watts)',
' DriveLength (meters)',
' StrokeDistance (meters)',
' DriveTime (ms)',
' DragFactor',
' StrokeRecoveryTime (ms)',
' AverageDriveForce (lbs)',
' PeakDriveForce (lbs)',
' lapIdx',
' ElapsedTime (sec)',
'Lat',
'Lon',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
velo = pd.to_numeric(self.df['Speed (m/s)'], errors='coerce')
pace = 500. / velo
pace = pace.replace(np.nan, 300)
pace = pace.replace(np.inf, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
seconds = self.df[self.columns['TimeStamp (sec)']]
res = make_cumvalues_array(np.array(seconds))
seconds3 = res[0]
lapidx = res[1]
# HR versions
try:
hr = self.df[self.columns[' HRCur (bpm)']]
except KeyError:
hr = self.df['HR (BPM)']
self.df[self.columns[' HRCur (bpm)']] = hr
spm = self.df[self.columns[' Cadence (stokes/min)']]
try:
strokelength = velo / (spm / 60.)
except TypeError: # pragma: no cover
strokelength = 0*velo
unixtimes = pd.Series(seconds3 + totimestamp(self.row_date))
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes.iloc[0]
self.df[self.columns[' StrokeDistance (meters)']] = strokelength
self.to_standard()
class RowProParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
separator = get_separator(15, csvfile)
skipfooter = skip_variable_footer(csvfile)
kwargs['skipfooter'] = skipfooter
kwargs['engine'] = 'python'
kwargs['skiprows'] = 14
kwargs['usecols'] = None
kwargs['sep'] = separator
super(RowProParser, self).__init__(*args, **kwargs)
self.footer = get_rowpro_footer(csvfile)
# crude EU format detector
try:
p = self.df['Pace'] * 500.
except TypeError: # pragma: no cover
convertlistbase = [
'Time',
'Distance',
'AvgPace',
'Pace',
'AvgWatts',
'Watts',
'SPM',
'EndHR'
]
converters = make_converter(convertlistbase,self.df)
kwargs['converters'] = converters
super(RowProParser, self).__init__(*args, **kwargs)
self.footer = get_rowpro_footer(csvfile, converters=converters)
# replace key values
footerwork = self.footer[self.footer['Type'] <= 1]
maxindex = self.df.index[-1]
endvalue = self.df.loc[maxindex, 'Time']
#self.df.loc[-1, 'Time'] = 0
dt = self.df['Time'].diff()
therowindex = self.df[dt < 0].index
if len(footerwork) == 2 * (len(therowindex) + 1):
footerwork = self.footer[self.footer['Type'] == 1]
self.df.loc[-1, 'Time'] = 0
dt = self.df['Time'].diff()
therowindex = self.df[dt < 0].index
nr = 0
for i in footerwork.index:
ttime = footerwork.loc[i, 'Time']
distance = footerwork.loc[i, 'Distance']
self.df.loc[therowindex[nr], 'Time'] = ttime
self.df.loc[therowindex[nr], 'Distance'] = distance
nr += 1
if len(footerwork) == len(therowindex) + 1: # pragma: no cover
self.df.loc[-1, 'Time'] = 0
dt = self.df['Time'].diff()
therowindex = self.df[dt < 0].index
nr = 0
for i in footerwork.index:
ttime = footerwork.loc[i, 'Time']
distance = footerwork.loc[i, 'Distance']
self.df.loc[therowindex[nr], 'Time'] = ttime
self.df.loc[therowindex[nr], 'Distance'] = distance
nr += 1
else:
self.df.loc[maxindex, 'Time'] = endvalue
for i in footerwork.index:
ttime = footerwork.loc[i, 'Time']
distance = footerwork.loc[i, 'Distance']
diff = self.df['Time'].apply(lambda z: abs(ttime - z))
diff.sort_values(inplace=True)
theindex = diff.index[0]
self.df.loc[theindex, 'Time'] = ttime
self.df.loc[theindex, 'Distance'] = distance
dateline = get_file_line(11, csvfile)
dated = dateline.split(',')[0]
dated2 = dateline.split(';')[0]
try:
self.row_date = parser.parse(dated, fuzzy=True,yearfirst=True,dayfirst=False)
except ValueError: # pragma: no cover
self.row_date = parser.parse(dated2, fuzzy=True,yearfirst=True,dayfirst=False)
self.cols = [
'Time',
'Distance',
'SPM',
'HR',
'Pace',
'Watts',
'',
'',
'',
'',
'',
'',
'',
' lapIdx',
' ElapsedTime (sec)',
' latitude',
' longitude',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# calculations
self.df[self.columns[' Stroke500mPace (sec/500m)']] *= 500.0
seconds = self.df[self.columns['TimeStamp (sec)']] / 1000.
res = make_cumvalues(seconds)
seconds2 = res[0] + seconds[0]
lapidx = res[1]
seconds3 = seconds2.interpolate()
seconds3[0] = seconds[0]
unixtimes = seconds3 + arrow.get(self.row_date).timestamp()
# seconds3 = pd.to_timedelta(seconds3, unit='s')
# try:
# tts = self.row_date + seconds3
# unixtimes = tts.apply(lambda x: arrow.get(
# x).timestamp() + arrow.get(x).microsecond / 1.e6)
# except ValueError:
# seconds3 = seconds2.interpolate()
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes.iloc[0]
self.to_standard()
class NKLiNKLogbookParser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else: # pragma: no cover
csvfile = kwargs['csvfile']
firmware = kwargs.get('firmware',None)
oarlength = kwargs.get('oarlength',None)
inboard = kwargs.get('inboard',None)
if firmware is not None:
try: # pragma: no cover
firmware = np.float(firmware)
except ValueError: # pragma: no cover
firmware = None
super(NKLiNKLogbookParser, self).__init__(*args, **kwargs)
self.cols = [
'timestamp',
'gpsTotalDistance',
'strokeRate',
'heartRate',
'gpsPace',
'power',
'',
'gpsDistStroke',
'driveTime',
'',
'',
'handleForceAvg',
'maxHandleForce',
'sessionIntervalId',
'elapsedTime',
'latitude',
'longitude',
'gpsInstaSpeed',
'catchAngle',
'slip',
'finishAngle',
'wash',
'realWorkPerStroke',
'positionOfMaxForce',
'impellerInstaSpeed',
'impellerTotalDistance',
]
self.defaultcolumnnames += [
'GPS Speed',
'catch',
'slip',
'finish',
'wash',
'driveenergy',
'peakforceangle',
# 'cum_dist',
'ImpellerSpeed',
'ImpellerDistance',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# do something with impeller stuff
self.df['GPSSpeed'] = self.df['gpsInstaSpeed']
self.df['GPSDistance'] = self.df['gpsTotalDistance']
# force is in Newtons
try:
self.df[self.columns[' PeakDriveForce (lbs)']] /= lbstoN
self.df[self.columns[' AverageDriveForce (lbs)']] /= lbstoN
except KeyError: # pragma: no cover # no oarlock data
pass
# timestamp is in milliseconds
self.df[self.columns['TimeStamp (sec)']] /= 1000.
self.df[self.columns[' ElapsedTime (sec)']] /= 1000.
self.df[self.columns[' Stroke500mPace (sec/500m)']] /= 1000.
try:
self.df[' StrokeRecoveryTime (ms)'] = self.df['cycleTime']-self.df[self.columns[' DriveTime (ms)']]
except KeyError: # pragma: no cover
pass
corr_factor = 1.0
if firmware is not None:
if firmware < 2.18: # pragma: no cover
# apply correction
oarlength, inboard = get_empower_rigging(csvfile)
if oarlength is not None and oarlength > 3.30:
# sweep
a = 0.15
b = 0.275
corr_factor = empower_bug_correction(oarlength,inboard,a,b)
elif oarlength is not None and oarlength <= 3.3:
# scull
a = 0.06
b = 0.225
corr_factor = empower_bug_correction(oarlength,inboard,a,b)
try:
self.df[self.columns[' Power (watts)']] *= corr_factor
self.df[self.columns['driveenergy']] *= corr_factor
except KeyError: # pragma: no cover
pass
res = make_cumvalues(self.df[self.columns[' Horizontal (meters)']])
self.df['cumdist'] = res[0]
self.df['cum_dist'] = res[0]
self.to_standard()
self.df = self.df.sort_values(by='TimeStamp (sec)',ascending=True)
def impellerconsistent(self, threshold = 0.3): # pragma: no cover
impellerconsistent = True
try:
impspeed = self.df['ImpellerSpeed']
except KeyError:
return False, True, 0
nrvalues = len(impspeed)
impspeed.fillna(inplace=True,value=0)
nrvalid = impspeed.astype(bool).sum()
ratio = float(nrvalues-nrvalid)/float(nrvalues)
if ratio > threshold:
impellerconsistent = False
return True, impellerconsistent, ratio
class SpeedCoach2Parser(CSVParser):
def __init__(self, *args, **kwargs):
if args:
csvfile = args[0]
else:
csvfile = kwargs['csvfile']
skiprows, summaryline, blanklines, sessionline = skip_variable_header(csvfile)
firmware = get_empower_firmware(csvfile)
corr_factor = 1.0
if firmware is not None:
if firmware < 2.18:
# apply correction
oarlength, inboard = get_empower_rigging(csvfile)
if oarlength is not None and oarlength > 3.30: # pragma: no cover
# sweep
a = 0.15
b = 0.275
corr_factor = empower_bug_correction(oarlength,inboard,a,b)
elif oarlength is not None and oarlength <= 3.3:
# scull
a = 0.06
b = 0.225
corr_factor = empower_bug_correction(oarlength,inboard,a,b)
unitrow = get_file_line(skiprows + 2, csvfile)
self.velo_unit = 'ms'
self.dist_unit = 'm'
if 'KPH' in unitrow:
self.velo_unit = 'kph'
if 'MPH' in unitrow: # pragma: no cover
self.velo_unit = 'mph'
if 'Kilometer' in unitrow:
self.dist_unit = 'km'
if 'Newtons' in unitrow:
self.force_unit = 'N'
else:
self.force_unit = 'lbs'
kwargs['skiprows'] = skiprows
super(SpeedCoach2Parser, self).__init__(*args, **kwargs)
self.df = self.df.drop(self.df.index[[0]])
for c in self.df.columns:
if c not in ['Elapsed Time']:
self.df[c] = pd.to_numeric(self.df[c], errors='coerce')
self.cols = [
'Elapsed Time',
'GPS Distance',
'Stroke Rate',
'Heart Rate',
'Split (GPS)',
'Power',
'',
'',
'',
'',
'',
'Force Avg',
'Force Max',
'Interval',
' ElapsedTime (sec)',
'GPS Lat.',
'GPS Lon.',
'GPS Speed',
'Catch',
'Slip',
'Finish',
'Wash',
'Work',
'Max Force Angle',
'cum_dist',
]
self.defaultcolumnnames += [
'GPS Speed',
'catch',
'slip',
'finish',
'wash',
'driveenergy',
'peakforceangle',
'cum_dist',
]
self.cols = [b if a == '' else a
for a, b in zip(self.cols, self.defaultcolumnnames)]
self.columns = dict(list(zip(self.defaultcolumnnames, self.cols)))
# correct Power, Work per Stroke
try:
self.df[self.columns[' Power (watts)']] *= corr_factor
self.df[self.columns['driveenergy']] *= corr_factor
except KeyError:
pass
# set GPS speed apart for swapping
try:
self.df['GPSSpeed'] = self.df['GPS Speed']
self.df['GPSDistance'] = self.df['GPS Distance']
except KeyError:
try:
self.df['GPSSpeed'] = self.df['Speed (GPS)']
self.df['GPSDistance'] = self.df['Distance (GPS)']
self.columns['GPS Speed'] = 'Speed (GPS)'
self.columns[' Horizontal (meters)'] = 'Distance (GPS)'
except KeyError:
pass
if self.velo_unit != 'ms':
if self.velo_unit == 'kph':
self.df['GPSSpeed'] = self.df['GPSSpeed'] / 3.6
self.df['Speed (IMP)'] = self.df['Speed (IMP)'] / 3.6
if self.velo_unit == 'mph':
self.df['GPSSpeed'] = self.df['GPSSpeed'] * 0.44704
self.df['Speed (IMP)'] = self.df['Speed (IMP)'] * 0.44704
# take Impeller split / speed if available and not zero
try:
impspeed = self.df['Speed (IMP)']
self.columns['GPS Speed'] = 'Speed (IMP)'
self.columns[' Horizontal (meters)'] = 'Distance (IMP)'
self.df['ImpellerSpeed'] = impspeed
self.df['ImpellerDistance'] = self.df['Distance (IMP)']
except KeyError:
try:
impspeed = self.df['Imp Speed']
self.columns['GPS Speed'] = 'Imp Speed'
self.columns[' Horizontal (meters)'] = 'Imp Distance'
self.df['ImpellerSpeed'] = impspeed
self.df['ImpellerDistance'] = self.df['Imp Distance']
except KeyError:
impspeed = 0*self.df[self.columns['GPS Speed']]
if impspeed.std() != 0 and impspeed.mean() != 0:
self.df[self.columns['GPS Speed']] = impspeed
else:
self.columns['GPS Speed'] = 'GPS Speed'
self.columns[' Horizontal (meters)'] = 'GPS Distance'
#
try:
dist2 = self.df[self.columns[' Horizontal (meters)']]
except KeyError:
try:
dist2 = self.df['Distance (GPS)']
self.columns[' Horizontal (meters)'] = 'Distance (GPS)'
if 'GPS' in self.columns['GPS Speed']:
self.columns['GPS Speed'] = 'Speed (GPS)'
except KeyError: # pragma: no cover
try:
dist2 = self.df['Imp Distance']
self.columns[' Horizontal (meters)'] = 'Distance (GPS)'
self.columns[' Stroke500mPace (sec/500m)'] = 'Imp Split'
self.columns[' Power (watts)'] = 'Work'
self.columns['Work'] = 'Power'
self.columns['GPS Speed'] = 'Imp Speed'
except KeyError:
dist2 = self.df['Distance (IMP)']
self.columns[' Stroke500mPace (sec/500m)'] = 'Split (IMP)'
self.columns[' Horizontal (meters)'] = 'Distance (GPS)'
self.columns[' Power (watts)'] = 'Work'
self.columns['Work'] = 'Power'
self.columns['GPS Speed'] = 'Speed (IMP)'
try:
if self.force_unit == 'N':
self.df[self.columns[' PeakDriveForce (lbs)']] /= lbstoN
self.df[self.columns[' AverageDriveForce (lbs)']] /= lbstoN
except KeyError: # pragma: no cover
pass
if self.dist_unit == 'km':
#dist2 *= 1000
self.df[self.columns[' Horizontal (meters)']] *= 1000.
try:
self.df['GPSDistance'] *= 1000.
except KeyError: # pragma: no cover
pass
try:
self.df['ImpellerDistance'] *= 1000.
except KeyError: # pragma: no cover
pass
cum_dist = make_cumvalues_array(dist2.fillna(method='ffill').values)[0]
self.df[self.columns['cum_dist']] = cum_dist
velo = self.df[self.columns['GPS Speed']]
if self.velo_unit == 'kph':
velo = velo / 3.6
if self.velo_unit == 'mph': # pragma: no cover
velo = velo * 0.44704
pace = 500. / velo
pace = pace.replace(np.nan, 300)
self.df[self.columns[' Stroke500mPace (sec/500m)']] = pace
# get date from header
try:
dateline = get_file_line(4, csvfile)
dated = dateline.split(',')[1]
# self.row_date = parser.parse(dated, fuzzy=True, dayfirst=False)
self.row_date = parser.parse(dated,fuzzy=False,dayfirst=False)
alt_date = parser.parse(dated,fuzzy=False,dayfirst=True)
except ValueError:
dateline = get_file_line(3, csvfile)
dated = dateline.split(',')[1]
try:
# self.row_date = parser.parse(dated, fuzzy=True,dayfirst=False)
self.row_date = parser.parse(dated, fuzzy=False,dayfirst=False)
alt_date = parser.parse(dated,fuzzy=False,dayfirst=True)
except ValueError:
self.row_date = datetime.datetime.now()
alt_date = self.row_date
if alt_date.month == datetime.datetime.now().month:
if alt_date != self.row_date:
self.row_date = alt_date
if self.row_date.tzinfo is None or self.row_date.tzinfo.utcoffset(self.row_date) is None:
try:
latavg = self.df[self.columns[' latitude']].mean()
lonavg = self.df[self.columns[' longitude']].mean()
tf = TimezoneFinder()
timezone_str = tf.timezone_at(lng=lonavg, lat=latavg)
if timezone_str is None: # pragma: no cover
timezone_str = tf.closest_timezone_at(lng=lonavg,
lat=latavg)
row_date = self.row_date
row_date = pytz.timezone(timezone_str).localize(row_date)
except KeyError:
row_date = pytz.timezone('UTC').localize(self.row_date)
self.row_date = row_date
timestrings = self.df[self.columns['TimeStamp (sec)']]
seconds = timestrings.apply(
lambda x: timestrtosecs2(x, unknown=np.nan)
)
seconds = clean_nan(np.array(seconds))
seconds = pd.Series(seconds).fillna(method='ffill').values
res = make_cumvalues_array(np.array(seconds))
seconds3 = res[0]
lapidx = res[1]
unixtimes = seconds3 + totimestamp(self.row_date)
if not self.df.empty:
self.df[self.columns[' lapIdx']] = lapidx
self.df[self.columns['TimeStamp (sec)']] = unixtimes
self.columns[' ElapsedTime (sec)'] = ' ElapsedTime (sec)'
self.df[self.columns[' ElapsedTime (sec)']] = unixtimes - unixtimes[0]
self.to_standard()
# Read summary data
skipfooter = 7 + len(self.df)
if not blanklines:
skipfooter = skipfooter - 3
if summaryline:
try:
self.summarydata = pd.read_csv(csvfile,
skiprows=summaryline,
skipfooter=skipfooter,
engine='python')
self.summarydata.drop(0, inplace=True)
except: # pragma: no cover
self.summarydata = | pd.DataFrame() | pandas.DataFrame |
import pytest
import pandas as pd
import goldenowl.asset.asset as at
def get_prdata():
date_range =[elem for elem in | pd.date_range(start="1990-01-01",end="2000-01-01", freq='1D') | pandas.date_range |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import sys
sys.path.append('/home/will/PatientPicker/')
# <codecell>
import LoadingTools
# <codecell>
redcap_data = LoadingTools.load_redcap_data().groupby(['Patient ID', 'VisitNum']).first()
# <codecell>
cols = ['Date Of Visit']+[col for col in redcap_data.columns if col.startswith('Test-')]
cols
# <codecell>
import pandas as pd
pairs = [['A0138', 'A0360'],
['A0017', 'A0054'],
['A0235', 'A0266'],
['A0023', 'A0114'],
['A0059', 'A0403'],
]
res = []
for gp in pairs:
tdata = redcap_data[cols].ix[gp].reset_index()
tdata['TruePat'] = gp[0]
res.append(tdata.copy())
nres = | pd.concat(res, axis=0, ignore_index=True) | pandas.concat |
import sys
import os
import numpy as np
import subprocess as sp
import multiprocessing as mp
import pandas as pd
from pyhdf.SD import SD, SDC
from itertools import repeat
import pickle
import datetime
from time import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"Tools"
)
)
from collocation import brute_force_parallel, clean_up_df, save_df, read_list
from caliop_tools import number_to_bit, custom_feature_conversion, calipso_to_datetime
def build_args(himawari_folder_names, himawari_base_dir,
caliop_filename, caliop_base_dir):
"""
Takes the list of Himawari-8 scenes and collocates them with the given CALIOP file.
:param himawari_folder_names: list type. List of folder names for the Himawari-8
scenes to be collocated.
:param himawari_base_dir: str type. The full path to the directory in which the
Himawari-8 data is held.
:param caliop_filename: str type. Name of the CALIOP file to be collocated with the given
Himawari-8 scenes.
:param caliop_base_dir: str type. The full path to the directory in which the
CALIOP file is held.
:return: list type. A list of lists, with each entry corresponding to a Himawari-8
scene and CALIOP overpass to be collocated.
"""
args = [] # Create empty list of args to be filled
for himawari_name in himawari_folder_names: # For each folder containing a Himawari-8 scene
arg = (os.path.join(himawari_base_dir, himawari_name), # Himawari-8 folder from list
himawari_name, # Name of the Himawari-8 folder to be collocated
os.path.join(caliop_base_dir, caliop_filename), # CALIOP file to be collocated
caliop_filename[-25:-4]) # CALIOP overpass "name", I.E. the date-time marker given in the filename.
args.append(arg) # Add sublist of args for collocation to args list
return args
def parallel_collocation(brute_force_args):
"""
Will take the input args and collocate each set of files in parallel.
:param brute_force_args: list type. Output of build_args function for collocation args.
:return: list of collocated dataframes
"""
manager = mp.Manager() # Start Manager to track parallel processes
return_dict = manager.dict() # Create dictionary for temporarily storing collocated data
processes = [] # Create empty list of processes to be filled
for num, arg_set in enumerate(brute_force_args): # Set each collocation task running
proc_num = num + 1
print('Starting Process %s' % proc_num)
p = mp.Process(target = brute_force_parallel, # Create a collocation process
args = tuple(list(arg_set) + [return_dict, proc_num]))
processes.append(p) # Add task to processes list
p.start() # Start the collocation process
for process in processes: # For each process started
process.join() # Connect all processes so that the next line in this
# function will not run until al processes have been completed
print('Processes completed')
print('Returned process keys: ', return_dict.keys())
process_nums = return_dict.keys()
process_nums.sort()
df_list = [] # Create a final list for storing the collocated data
for num in process_nums: # For each process's collocated dataframe
df_list.append(return_dict[str(num)]) # Add dataframe to df_list; ensures ordering is correct
return df_list # Return the ordered list of collocated dataframes
def post_process(list_of_df):
"""
Takes a list of collocated dataframes and processes them into a single dataframe.
:param list_of_df: list type. List of dataframes to be concatenated and processed.
:return: pandas dataframe of collocated data.
"""
list_of_df = tuple(list_of_df) #Ensure list is converted to tuple type
df = | pd.concat(list_of_df, ignore_index=True) | pandas.concat |
'''
@lptMusketeers 2017.10.20
'''
import pandas as pd
import datetime
from functools import reduce
import codecs
import csv
from decimal import *
import numpy as np
class FeatureEngineering(object):
def nondrop_precent(self,source_path,target_path):
print("nondrop_precent...")
df1 = pd.read_csv(source_path)
df1["nondrop_precent"]=df1["nondropout_num"]/df1["course_num"]
df1.to_csv(target_path,index=False)
def add(self,x,y):
return x+y
def op_character(self,source_path,target_path):
print("op_character...")
df1 = pd.read_csv(source_path)
gpby_enrol = df1.groupby("enrollment_id")
enrol_list = list()
interval_list = list()
last_minutes = list()
valid_opnum = list()
all_opnum = list()
for enrollment_id,group in gpby_enrol:
group.groupby("interval")
for interval,group2 in group.groupby('interval'):
enrol_list.append(enrollment_id)
interval_list.append(interval)
timelist = group2.time.tolist()
h1 = datetime.datetime.strptime(timelist[0],'%H:%M:%S')
h2 = datetime.datetime.strptime(timelist[len(timelist)-1],'%H:%M:%S')
hh = h2-h1
last_minutes.append(hh.seconds/60+1)
valid_len = [0,0,0,0]
valid_len[0] = len(group2[group2.event=='problem'])
valid_len[1] = len(group2[group2.event=='video'])
valid_len[2] = len(group2[group2.event == 'wiki'])
valid_len[3] = len(group2[group2.event == 'discussion'])
valid_opnum.append(reduce(self.add,valid_len))
all_opnum.append(len(group2))
df2 = pd.DataFrame({"enrollment_id":enrol_list,"interval":interval_list,"last_minutes":last_minutes,"valid_opnum":valid_opnum,"all_opnum":all_opnum})
df2 = df2[["enrollment_id","interval","last_minutes","valid_opnum","all_opnum"]]
df2.to_csv(target_path,index=False)
def op_of_day(self,source_path,target_path):
print("op_of_day...")
log_file = codecs.open(source_path,'r','utf-8')
log_final_file = codecs.open(target_path,'w+','utf-8')
framedata1 = pd.read_csv(log_file)
writer = csv.writer(log_final_file,delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writedata = list()
for i in range(0,111):
writedata.append('')
writedata[0]="enrollment_id"
index =1
for i in range(1,31):
writedata[i]="all_opnum_"+str(i)
writedata[i+30]="valid_opnum_"+str(i)
writedata[i+60]="last_minutes_"+str(i)
index += 3
name_array1 = ["pre","mid","last","thirty_day"]
name_array2 = ["min","max","sum","mean","std"]
for name1 in name_array1:
for name2 in name_array2:
writedata[index]=name1+"_"+name2
index += 1
writer.writerow(writedata)
for enrollment_id,group in framedata1.groupby('enrollment_id'):
writedata[0]=enrollment_id
interval_list = group.interval.tolist()
last_minutes_list = group.last_minutes.tolist()
valid_num_list = group.valid_opnum.tolist()
all_num_list = group.all_opnum.tolist()
tag = 0
for i in range(1,31):
if i in interval_list: #如果用户今天参加课程
writedata[i] = all_num_list[tag] #第一个30特征,记录操作总次数
writedata[i+30] = valid_num_list[tag] #第二个30特征,记录有效操作次数
writedata[i+60] = last_minutes_list[tag] #第三个30特征,记录持续时间
tag = tag + 1
else: #如果用户今天没有操作
writedata[i] = 0
writedata[i+30] = 0
writedata[i+60] = 0
tag = 0
'''
分前中后三个阶段统计总操作次数特征
'''
preall = list()
midall = list()
lastall = list()
for i in range(1, 31):
if i in interval_list:
if i > 0 and i <= 10:
preall.append(all_num_list[tag])
if i > 10 and i <= 20:
midall.append(all_num_list[tag])
if i > 20 and i <= 30:
lastall.append(all_num_list[tag])
tag = tag + 1
else:
if i > 0 and i <= 10:
preall.append(0)
if i > 10 and i <= 20:
midall.append(0)
if i > 20 and i <= 30:
lastall.append(0)
########处理前十天的相关统计#######
writedata[91] = min(preall) #前十天中最小的操作次数
writedata[92] = max(preall) #前十天中最大的操作次数
writedata[93] = np.array(preall).sum() #前十天的总操作总次数
writedata[94] = int(np.array(preall).mean())#前十天的平均次数
writedata[95] = Decimal(np.array(preall).std()).quantize(Decimal('0.00')) #操作次数的标准差
#########处理中间十天的相关统计#########
writedata[96] = min(midall)
writedata[97] = max(midall)
writedata[98] = np.array(midall).sum()
writedata[99] = int(np.array(midall).mean())
writedata[100]= Decimal(np.array(midall).std()).quantize(Decimal('0.00'))
########处理后十天的相关统计############
writedata[101] = min(lastall)
writedata[102] = max(lastall)
writedata[103] = np.array(lastall).sum()
writedata[104] = int(np.array(lastall).mean())
writedata[105] = Decimal(np.array(lastall).std()).quantize(Decimal('0.00'))
########处理三十天的相关统计############
tag = 0
writedata[106] = min(all_num_list)
writedata[107] = max(all_num_list)
templist = all_num_list
writedata[108] = np.array(templist).sum()
for i in range(0,30-len(all_num_list)):
templist.append(0)
writedata[109] = int(np.array(templist).mean())
writedata[110] = Decimal(np.array(templist).std()).quantize(Decimal('0.00'))
#print ('正在处理中....',enrollment_id)
writer.writerow(writedata) #写入文件
def feature_all(self,source_path1,source_path2,target_path):
print("feature_all...")
df1 = pd.read_csv(source_path1)
df2 = pd.read_csv(source_path2)
df3 = | pd.merge(df1,df2,on="enrollment_id",how="left") | pandas.merge |
import os
import time
import numpy as np
import pandas as pd
import scipy.sparse as ssp
import scipy.stats as stats
import statsmodels.sandbox.stats.multicomp
from ete3 import Tree
from matplotlib import pyplot as plt
from numpy.lib.twodim_base import tril_indices
from scipy.cluster import hierarchy
# from plotnine import *
from sklearn.manifold import SpectralEmbedding
from cospar import tool as tl
from cospar.plotting import _utils as pl_util
from .. import help_functions as hf
from .. import logging as logg
from .. import settings
def gene_expression_dynamics(
adata,
selected_fate,
gene_name_list,
traj_threshold=0.1,
source="transition_map",
invert_PseudoTime=False,
mask=None,
compute_new=True,
gene_exp_percentile=99,
n_neighbors=8,
plot_raw_data=False,
stat_smooth_method="loess",
):
"""
Plot gene trend along the inferred dynamic trajectory.
We assume that the dynamic trajecotry at given specification is already
available at adata.obs[f'traj_{fate_name}'], which can be created via
:func:`.iterative_differentiation` or
:func:`.progenitor`.
Using the states that belong to the trajectory, it computes the pseudo time
for these states and shows expression dynamics of selected genes along
this pseudo time.
Specifically, we first construct KNN graph, compute spectral embedding,
and take the first component as the pseudo time. To create dynamics for a
selected gene, we re-weight the expression of this gene at each cell by its
probability belonging to the trajectory, and rescale the expression at selected
percentile value. Finally, we fit a curve to the data points.
Parameters
----------
adata: :class:`~anndata.AnnData` object
Assume to contain transition maps at adata.uns.
selected_fate: `str`, or `list`
targeted cluster of the trajectory, as consistent with adata.obs['state_info']
When it is a list, the listed clusters are combined into a single fate cluster.
gene_name_list: `list`
List of genes to plot on the dynamic trajectory.
traj_threshold: `float`, optional (default: 0.1), range: (0,1)
Relative threshold, used to thresholding the inferred dynamic trajecotry to select states.
invert_PseudoTime: `bool`, optional (default: False)
If true, invert the pseudotime: 1-pseuotime. This is useful when the direction
of pseudo time does not agree with intuition.
mask: `np.array`, optional (default: None)
A boolean array for further selecting cell states.
compute_new: `bool`, optional (default: True)
If true, compute everyting from stratch (as we save computed pseudotime)
gene_exp_percentile: `int`, optional (default: 99)
Plot gene expression below this percentile.
n_neighbors: `int`, optional (default: 8)
Number of nearest neighbors for constructing KNN graph.
plot_raw_data: `bool`, optional (default: False)
Plot the raw gene expression values of each cell along the pseudotime.
stat_smooth_method: `str`, optional (default: 'loess')
Smooth method used in the ggplot. Current available choices are:
'auto' (Use loess if (n<1000), glm otherwise),
'lm' (Linear Model),
'wls' (Linear Model),
'rlm' (Robust Linear Model),
'glm' (Generalized linear Model),
'gls' (Generalized Least Squares),
'lowess' (Locally Weighted Regression (simple)),
'loess' (Locally Weighted Regression),
'mavg' (Moving Average),
'gpr' (Gaussian Process Regressor)}.
"""
if mask == None:
final_mask = np.ones(adata.shape[0]).astype(bool)
else:
if mask.shape[0] == adata.shape[0]:
final_mask = mask
else:
logg.error(
"mask must be a boolean array with the same size as adata.shape[0]."
)
return None
hf.check_available_map(adata)
fig_width = settings.fig_width
fig_height = settings.fig_height
point_size = settings.fig_point_size
if len(adata.uns["available_map"]) == 0:
logg.error(f"There is no transition map available yet")
else:
if type(selected_fate) == str:
selected_fate = [selected_fate]
(
mega_cluster_list,
valid_fate_list,
fate_array_flat,
sel_index_list,
) = hf.analyze_selected_fates(adata.obs["state_info"], selected_fate)
if len(mega_cluster_list) == 0:
logg.error("No cells selected. Computation aborted!")
return adata
else:
fate_name = mega_cluster_list[0]
target_idx = sel_index_list[0]
x_emb = adata.obsm["X_emb"][:, 0]
y_emb = adata.obsm["X_emb"][:, 1]
data_des = adata.uns["data_des"][-1]
data_path = settings.data_path
figure_path = settings.figure_path
file_name = os.path.join(
data_path, f"{data_des}_fate_trajectory_pseudoTime_{fate_name}.npy"
)
traj_name = f"diff_trajectory_{source}_{fate_name}"
if traj_name not in adata.obs.keys():
logg.error(
f"The target fate trajectory for {fate_name} with {source} have not been inferred yet.\n"
"Please infer the trajectory with first with cs.pl.progenitor, \n"
"or cs.pl.iterative_differentiation."
)
else:
prob_0 = np.array(adata.obs[traj_name])
sel_cell_idx = (prob_0 > traj_threshold * np.max(prob_0)) & final_mask
if np.sum(sel_cell_idx) == 0:
raise ValueError("No cells selected!")
sel_cell_id = np.nonzero(sel_cell_idx)[0]
if os.path.exists(file_name) and (not compute_new):
logg.info("Load pre-computed pseudotime")
PseudoTime = np.load(file_name)
else:
from sklearn import manifold
data_matrix = adata.obsm["X_pca"][sel_cell_idx]
method = SpectralEmbedding(n_components=1, n_neighbors=n_neighbors)
PseudoTime = method.fit_transform(data_matrix)
np.save(file_name, PseudoTime)
# logg.info("Run time:",time.time()-t)
PseudoTime = PseudoTime - np.min(PseudoTime)
PseudoTime = (PseudoTime / np.max(PseudoTime)).flatten()
## re-order the pseudoTime such that the target fate has the pseudo time 1.
if invert_PseudoTime:
# target_fate_id=np.nonzero(target_idx)[0]
# convert_fate_id=hf.converting_id_from_fullSpace_to_subSpace(target_fate_id,sel_cell_id)[0]
# if np.mean(PseudoTime[convert_fate_id])<0.5: PseudoTime=1-PseudoTime
PseudoTime = 1 - PseudoTime
# pdb.set_trace()
if (
np.sum((PseudoTime > 0.25) & (PseudoTime < 0.75)) == 0
): # the cell states do not form a contiuum. Plot raw data instead
logg.error(
"The selected cell states do not form a connected graph. Cannot form a continuum of pseudoTime. Only plot the raw data"
)
plot_raw_data = True
## plot the pseudotime ordering
fig = plt.figure(figsize=(fig_width * 2, fig_height))
ax = plt.subplot(1, 2, 1)
pl_util.customized_embedding(
x_emb,
y_emb,
sel_cell_idx,
ax=ax,
title="Selected cells",
point_size=point_size,
)
ax1 = plt.subplot(1, 2, 2)
pl_util.customized_embedding(
x_emb[sel_cell_idx],
y_emb[sel_cell_idx],
PseudoTime,
ax=ax1,
title="Pseudo Time",
point_size=point_size,
)
# customized_embedding(x_emb[final_id],y_emb[final_id],PseudoTime,ax=ax1,title='Pseudo time')
Clb = fig.colorbar(
plt.cm.ScalarMappable(cmap=plt.cm.Reds), ax=ax1, label="Pseudo time"
)
fig.savefig(
os.path.join(
figure_path,
f"{data_des}_fate_trajectory_pseudoTime_{fate_name}.{settings.file_format_figs}",
)
)
temp_dict = {"PseudoTime": PseudoTime}
for gene_name in gene_name_list:
yy_max = np.percentile(
adata.obs_vector(gene_name), gene_exp_percentile
) # global blackground
yy = np.array(adata.obs_vector(gene_name)[sel_cell_idx])
rescaled_yy = (
yy * prob_0[sel_cell_idx] / yy_max
) # rescaled by global background
temp_dict[gene_name] = rescaled_yy
from plotnine import (
aes,
geom_point,
ggplot,
labs,
stat_smooth,
theme_classic,
)
data2 = | pd.DataFrame(temp_dict) | pandas.DataFrame |
from typing import List
from typing import Union
import pandas as pd
import pystac
import shapely
from google.cloud import bigquery
from google.oauth2 import service_account
from pystac.extensions.eo import AssetEOExtension
from pystac.extensions.eo import EOExtension
from pystac.extensions.projection import ProjectionExtension
from satextractor.models.constellation_info import BAND_INFO
from satextractor.models.constellation_info import LANDSAT_PROPERTIES
from satextractor.models.constellation_info import MEDIA_TYPES
from satextractor.utils import get_utm_epsg
def gcp_region_to_item_collection(
credentials: str,
region: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],
start_date: str,
end_date: str,
constellations: List[str],
) -> pystac.ItemCollection:
"""Create stac ItemCollection for a given Sentinel 2
Google Storage Region between dates.
Args:
credentials (str): The bigquery client credentials json path
region (Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): the region
start_date (str): sensing start date
end_date (str): sensing end date
Returns:
pystac.ItemCollection: a item collection for the given region and dates
"""
credentials = service_account.Credentials.from_service_account_file(credentials)
# Construct a BigQuery client object.
client = bigquery.Client(credentials=credentials)
dfs = []
for constellation in constellations:
if constellation == "sentinel-2":
df = get_sentinel_2_assets_df(client, region, start_date, end_date)
else:
df = get_landsat_assets_df(
client,
region,
start_date,
end_date,
constellation,
)
df["constellation"] = constellation
dfs.append(df)
df = pd.concat(dfs)
return create_stac_item_collection_from_df(df)
def get_landsat_assets_df(
client: bigquery.Client,
shp: Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon],
start_date: str,
end_date: str,
constellation: str,
) -> pd.DataFrame:
"""Perform a bigquery to obtain landsat assets as a dataframe.
Args:
client (bigquery.Client): The bigquery client with correct auth
region (Union[shapely.geometry.Polygon, shapely.geometry.MultiPolygon]): the region
start_date (str): sensing start date
end_date (str): sensing end date
constellation (str): which constellation to retreive in ['landsat-5','landsat-7','landsat-8']
Returns:
[type]: a dataframe with the query results
"""
if shp.type == "Polygon":
shp = [shp]
dfs = []
for subshp in shp:
(
region_west_lon,
region_south_lat,
region_east_lon,
region_north_lat,
) = subshp.bounds # this won't work for multipolygons. need to manage this.
query = f"""
SELECT * FROM
`bigquery-public-data.cloud_storage_geo_index.landsat_index`
WHERE DATE(sensing_time) >= "{start_date}" and DATE(sensing_time) <= "{end_date}"
AND spacecraft_id = "{constellation.upper().replace('-','_')}"
AND data_type = "{LANDSAT_PROPERTIES[constellation]['DATA_TYPE']}"
AND sensor_id = "{LANDSAT_PROPERTIES[constellation]['SENSOR_ID']}"
AND west_lon <= {region_east_lon}
AND east_lon >= {region_west_lon}
AND north_lat >= {region_south_lat}
AND south_lat <= {region_north_lat}
"""
query_job = client.query(query) # Make an API request.
dfs.append(query_job.to_dataframe())
df = | pd.concat(dfs) | pandas.concat |
import sys, os
import django
import csv
import calendar
import datetime
import re
import argparse
import openpyxl
import pandas as pd
from typing import TYPE_CHECKING, Dict, List, Optional
from pandas._typing import FilePathOrBuffer, Scalar
from django.core.mail import send_mail
from django.db import transaction
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "carbure.settings")
django.setup()
from core.models import GenericCertificate
today = datetime.date.today()
CSV_FOLDER = '/tmp/redcert/'
def get_sheet_data(sheet, convert_float: bool) -> List[List[Scalar]]:
data: List[List[Scalar]] = []
for row in sheet.rows:
data.append([convert_cell(cell, convert_float) for cell in row])
return data
def convert_cell(cell, convert_float: bool) -> Scalar:
from openpyxl.cell.cell import TYPE_BOOL, TYPE_ERROR, TYPE_NUMERIC
if cell.is_date:
return cell.value
elif cell.data_type == TYPE_ERROR:
return np.nan
elif cell.data_type == TYPE_BOOL:
return bool(cell.value)
elif cell.value is None:
return "" # compat with xlrd
elif cell.data_type == TYPE_NUMERIC:
# GH5394
if convert_float:
val = int(cell.value)
if val == cell.value:
return val
else:
return float(cell.value)
return cell.value
def load_certificates():
new = []
invalidated = []
existing = {c.certificate_id: c for c in GenericCertificate.objects.filter(certificate_type=GenericCertificate.REDCERT)}
filename = '%s/REDcert-certificates.xlsx' % (CSV_FOLDER)
wb = openpyxl.load_workbook(filename, data_only=True)
sheet = wb.worksheets[0]
data = get_sheet_data(sheet, convert_float=True)
column_names = data[0]
data = data[1:]
df = | pd.DataFrame(data, columns=column_names) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generates the data needed for Supplementary Figure 3.
The figure is generated by the routine fig_2d_age_bdi.py
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy.stats import median_test
ref = datetime.date(2019, 12, 31)
max_dur = 90
data0 = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data0 = data0[(~pd.isna(data0.NU_IDADE_N))&(~pd.isna(data0.ibp))]
saida_H = {'mean_all':[], 'stdm_all':[], 'median_all':[], \
'mean_death':[], 'stdm_death':[], 'median_death':[], 'mean_cure':[],\
'stdm_cure':[], 'median_cure':[], 'n_death':[], 'n_cure':[],\
'age_min':[], 'age_mean':[], 'age_max':[], \
'ibp_min':[], 'ibp_mean':[], 'ibp_max':[]}
saida_U = {'mean_all':[], 'stdm_all':[], 'median_all':[], \
'mean_death':[], 'stdm_death':[], 'median_death':[], 'mean_cure':[],\
'stdm_cure':[], 'median_cure':[], 'n_death':[], 'n_cure':[],\
'age_min':[], 'age_mean':[], 'age_max':[],
'ibp_min':[], 'ibp_mean':[], 'ibp_max':[]}
data0 = data0[~(data0.UTI_dur<0)]
data0 = data0[~(data0.HOSP_dur<0)]
data0 = data0[~((data0.UTI_dur>max_dur)|(data0.HOSP_dur>max_dur))]
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
nsep2 = 10
ibps = np.linspace(data0.ibp.min(), data0.ibp.max(), nsep2+1)
#%%
for j in range(nsep2):
for i in range(nsep):
print(i,j)
if i == nsep-1:
data = data0[(data0.NU_IDADE_N>=ages[i])]
else:
data = data0[(data0.NU_IDADE_N>=ages[i])&(data0.NU_IDADE_N<ages[i+1])]
if j == nsep2-1:
data = data[data.ibp>=ibps[j]]
else:
data = data[(data.ibp>=ibps[j])&(data.ibp<ibps[j+1])]
agem = data.NU_IDADE_N.mean()
agei = [data.NU_IDADE_N.min(), data.NU_IDADE_N.max()]
saida_H['age_mean'].append(agem)
saida_U['age_mean'].append(agem)
saida_H['age_max'].append(agei[1])
saida_U['age_max'].append(agei[1])
saida_H['age_min'].append(agei[0])
saida_U['age_min'].append(agei[0])
ibpm = data.ibp.mean()
ibpi = [data.ibp.min(), data.ibp.max()]
saida_H['ibp_mean'].append(ibpm)
saida_U['ibp_mean'].append(ibpm)
saida_H['ibp_max'].append(ibpi[1])
saida_U['ibp_max'].append(ibpi[1])
saida_H['ibp_min'].append(ibpi[0])
saida_U['ibp_min'].append(ibpi[0])
hU, b_edg = np.histogram(data.UTI_dur, bins=np.arange(0, max_dur+1))
hH, b_edg = np.histogram(data.HOSP_dur[pd.isna(data.UTI_dur)], \
bins=np.arange(0, max_dur+1))
dICU = np.sum((~pd.isna(data.DT_ENTUTI))&(data.EVOLUCAO==2))
cICU = np.sum((~pd.isna(data.DT_ENTUTI))&(data.EVOLUCAO==1))
dH = np.sum((pd.isna(data.DT_ENTUTI))&(data.EVOLUCAO==2))
cH = np.sum((pd.isna(data.DT_ENTUTI))&(data.EVOLUCAO==1))
saida_H['n_death'].append(dH)
saida_U['n_death'].append(dICU)
saida_H['n_cure'].append(cH)
saida_U['n_cure'].append(cICU)
U = data.UTI_dur[~pd.isna(data.UTI_dur)]
tU = U.mean()
stU = U.std(ddof=1) / np.sqrt(len(U))
saida_U['mean_all'].append(tU)
saida_U['stdm_all'].append(stU)
saida_U['median_all'].append(U.median())
U_d = data.UTI_dur[(~pd.isna(data.UTI_dur))&(data.EVOLUCAO==2)]
tU_d = U_d.mean()
stU_d = U_d.std(ddof=1) / np.sqrt(len(U_d))
saida_U['mean_death'].append(tU_d)
saida_U['stdm_death'].append(stU_d)
saida_U['median_death'].append(U_d.median())
U_c = data.UTI_dur[(~pd.isna(data.UTI_dur))&(data.EVOLUCAO==1)]
tU_c = U_c.mean()
stU_c = U_c.std(ddof=1) / np.sqrt(len(U_c))
saida_U['mean_cure'].append(tU_c)
saida_U['stdm_cure'].append(stU_c)
saida_U['median_cure'].append(U_c.median())
H = data.HOSP_dur[pd.isna(data.UTI_dur)]
tH = H.mean()
stH = H.std(ddof=1) / np.sqrt(len(H))
saida_H['mean_all'].append(tH)
saida_H['stdm_all'].append(stH)
saida_H['median_all'].append(H.median())
H_d = data.HOSP_dur[(pd.isna(data.UTI_dur))&(data.EVOLUCAO==2)]
tH_d = H_d.mean()
stH_d = H_d.std(ddof=1) / np.sqrt(len(H_d))
saida_H['mean_death'].append(tH_d)
saida_H['stdm_death'].append(stH_d)
saida_H['median_death'].append(H_d.median())
H_c = data.HOSP_dur[(pd.isna(data.UTI_dur))&(data.EVOLUCAO==1)]
tH_c = H_c.mean()
stH_c = H_c.std(ddof=1) / np.sqrt(len(H_c))
saida_H['mean_cure'].append(tH_c)
saida_H['stdm_cure'].append(stH_c)
saida_H['median_cure'].append(H_c.median())
sah = | pd.DataFrame(saida_H) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = | pd.Series([0.34, 0.84, 0.02], dtype='float') | pandas.Series |
import os
import glob
import pandas as pd
classes = os.listdir(os.getcwd())
for classf in classes:
#if os.path.isfile(classf) or classf == 'LAST':
#continue
PWD = os.getcwd() + "/" + classf + "/"
currentdname = os.path.basename(os.getcwd())
csvfiles=glob.glob(PWD + "/*.csv")
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + "_" + currentdname + ".csv"):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = | pd.read_csv(csvfile, index_col=0) | pandas.read_csv |
import requests
import pandas as pd
def sheet_to_df(access_token, sheet_id):
"""
Converts raw Smartsheet Sheet objects into a nice and tidy pandas DataFrame, just like mum used to make
For more detail, see: https://dataideas.blog/2018/11/13/loading-json-it-looks-simple-part-4/
:param access_token: str, required; Smartsheet api token
:param sheet_id: int, required; sheet ID
:return: pandas DataFrame of a Smartsheet sheet's contents
"""
api_prefix_url = 'https://api.smartsheet.com/2.0/sheets/' # base Smartsheet api url for Requests
url = api_prefix_url + str(sheet_id) # full url for requests
header = { # header for requests
'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json',
'cache-control': 'no-cache'
}
r = requests.get(url, headers=header) # create requests Response object of sheet's json
sheet_dic = r.json() # convert json to a dictionary
col_list = []
for c in sheet_dic['columns']: # for all columns in the sheet dictionary
col_list.append(c['title']) # add title value to col_list
df = | pd.DataFrame(columns=col_list) | pandas.DataFrame |
# Author: <NAME>
# Python Version: 3.6
## Copyright 2019 <NAME>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
import requests
import sys
import traceback
import csv
import re, string
from enums import *
from hebtokenizer import HebTokenizer
class YapApi(object):
"""
Interface to Open University YAP (Yet another parser) https://github.com/OnlpLab/yap.
This class is calling GO baesd server, and:
1. Wrap YAP in Python.
2. Add tokenizer. Credit: Prof' <NAME>.
3. Turn output CONLLU format to Dataframe & JSON.
"""
def __init__(self):
pass
def run(self, text:str, ip:str):
"""
text: the text to be parsed.
ip: YAP server IP, with port (default is 8000), if localy installed then 127.0.0.1:8000
"""
try:
print('Start Yap call')
# Keep alpha-numeric and punctuations only.
alnum_text=self.clean_text(text)
# Tokenize...
tokenized_text = HebTokenizer().tokenize(alnum_text)
tokenized_text = ' '.join([word for (part, word) in tokenized_text])
print("Tokens: {}".format(len(tokenized_text.split())))
self.init_data_items()
# Split to sentences for best performance.
text_arr=self.split_text_to_sentences(tokenized_text)
for i, sntnce_or_prgrph in enumerate( text_arr):
# Actual call to YAP server
rspns=self.call_yap(sntnce_or_prgrph, ip)
print('End Yap call {} /{}'.format( i ,len(text_arr)-1))
# Expose this code to print the results iin Conllu format
#conllu_dict=self.print_in_conllu_format(rspns)
# Turn CONLLU format to dataframe
_dep_tree, _md_lattice, _ma_lattice=self.conllu_format_to_dataframe(rspns)
_segmented_text= ' '.join( _dep_tree[yap_cols.word.name])
_lemmas=' '.join(_dep_tree[yap_cols.lemma.name])
self.append_prgrph_rslts(_dep_tree, _md_lattice, _ma_lattice, _segmented_text, _lemmas)
return tokenized_text, self.segmented_text, self.lemmas, self.dep_tree, self.md_lattice, self.ma_lattice
except Exception as err:
print( sys.exc_info()[0])
print( traceback.format_exc())
print( str(err))
print("Unexpected end of program")
def split_text_to_sentences(self, tokenized_text):
"""
YAP better perform on sentence-by-sentence.
Also, dep_tree is limited to 256 nodes.
"""
max_len=150
arr=tokenized_text.strip().split()
sentences=[]
# Finding next sentence break.
while (True):
stop_points=[h for h in [i for i, e in enumerate(arr) if re.match(r"[!|.|?]",e)] ]
if len(stop_points)>0:
stop_point=min(stop_points)
# Keep several sentence breaker as 1 word, like "...." or "???!!!"
while True:
stop_points.remove(stop_point)
if len(stop_points)>1 and min(stop_points)==(stop_point+1):
stop_point=stop_point+1
else:
break
# Case there is no sentence break, and this split > MAX LEN:
sntnc=arr[:stop_point+1]
if len(sntnc) >max_len:
while(len(sntnc) >max_len):
sentences.append(" ".join(sntnc[:140]))
sntnc=sntnc[140:]
sentences.append(" ".join(sntnc))
# Normal: sentence is less then 150 words...
else:
sentences.append(" ".join(arr[:stop_point+1] ))
arr=arr[stop_point+1:]
else:
break
if len(arr)>0:
sentences.append(" ".join(arr))
return sentences
def clean_text(self, text:str):
text=text.replace('\n', ' ').replace('\r', ' ')
pattern= re.compile(r'[^א-ת\s.,!?a-zA-Z]')
alnum_text =pattern.sub(' ', text)
while(alnum_text.find(' ')>-1):
alnum_text=alnum_text.replace(' ', ' ')
return alnum_text
def init_data_items(self):
self.segmented_text=""
self.lemmas=""
self.dep_tree=pd.DataFrame()
self.md_lattice=pd.DataFrame()
self.ma_lattice=pd.DataFrame()
def append_prgrph_rslts(self, _dep_tree:pd.DataFrame, _md_lattice:pd.DataFrame, _ma_lattice:pd.DataFrame,
_segmented_text:str, _lemmas:str):
self.segmented_text="{} {}".format(self.segmented_text, _segmented_text).strip()
self.lemmas="{} {}".format(self.lemmas, _lemmas).strip()
self.dep_tree= | pd.concat([self.dep_tree, _dep_tree]) | pandas.concat |
import warnings
from pkg_resources import resource_filename
from tqdm import tqdm
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from sklearn.externals import joblib
# import concise
from mmsplice.utils import logit, predict_deltaLogitPsi, \
predict_pathogenicity, predict_splicing_efficiency, encodeDNA, \
read_ref_psi_annotation, delta_logit_PSI_to_delta_PSI, \
mmsplice_ref_modules, mmsplice_alt_modules, df_batch_writer
from mmsplice.exon_dataloader import SeqSpliter
from mmsplice.mtsplice import MTSplice, tissue_names
from mmsplice.layers import GlobalAveragePooling1D_Mask0, ConvDNA
ACCEPTOR_INTRON = resource_filename('mmsplice', 'models/Intron3.h5')
DONOR = resource_filename('mmsplice', 'models/Donor.h5')
EXON = resource_filename('mmsplice', 'models/Exon.h5')
EXON3 = resource_filename('mmsplice', 'models/Exon_prime3.h5')
ACCEPTOR = resource_filename('mmsplice', 'models/Acceptor.h5')
DONOR_INTRON = resource_filename('mmsplice', 'models/Intron5.h5')
LINEAR_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/linear_model.pkl'))
LOGISTIC_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/Pathogenicity.pkl'))
EFFICIENCY_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/splicing_efficiency.pkl'))
custom_objects = {
'ConvDNA': ConvDNA
}
class MMSplice(object):
"""
Load modules of mmsplice model, perform prediction on batch of dataloader.
Args:
acceptor_intronM: acceptor intron model,
score acceptor intron sequence.
acceptorM: accetpor splice site model. Score acceptor sequence
with 50bp from intron, 3bp from exon.
exonM: exon model, score exon sequence.
donorM: donor splice site model, score donor sequence
with 13bp in the intron, 5bp in the exon.
donor_intronM: donor intron model, score donor intron sequence.
"""
def __init__(self,
acceptor_intronM=ACCEPTOR_INTRON,
acceptorM=ACCEPTOR,
exonM=EXON,
donorM=DONOR,
donor_intronM=DONOR_INTRON,
seq_spliter=None,
deep=True):
self.spliter = seq_spliter or SeqSpliter()
self.acceptor_intronM = load_model(
acceptor_intronM, compile=False,
custom_objects=custom_objects)
self.acceptorM = load_model(acceptorM, compile=False,
custom_objects=custom_objects)
self.exonM = load_model(exonM, compile=False, custom_objects={
"GlobalAveragePooling1D_Mask0": GlobalAveragePooling1D_Mask0,
'ConvDNA': ConvDNA
})
self.donorM = load_model(donorM, compile=False,
custom_objects=custom_objects)
self.donor_intronM = load_model(donor_intronM, compile=False,
custom_objects=custom_objects)
self.deep = deep
def predict_on_batch(self, batch):
warnings.warn(
"`self.predict_on_batch` is deprecated,"
" use `self.predict_modular_scores_on_batch instead`",
DeprecationWarning
)
return self.predict_modular_scores_on_batch(batch)
def predict_modular_scores_on_batch(self, batch):
'''
Perform prediction on batch of dataloader.
Args:
batch: batch of dataloader.
Returns:
np.matrix of modular predictions
as [[acceptor_intronM, acceptor, exon, donor, donor_intron]]
'''
score = np.concatenate([
self.acceptor_intronM.predict(batch['acceptor_intron']),
logit(self.acceptorM.predict(batch['acceptor'])),
self.exonM.predict(batch['exon']),
logit(self.donorM.predict(batch['donor'])),
self.donor_intronM.predict(batch['donor_intron'])
], axis=1)
return score
def predict(self, *args, **kwargs):
warnings.warn(
"self.predict is deprecated, use self.predict_on_seq instead",
DeprecationWarning
)
return self.predict_on_seq(*args, **kwargs)
def predict_on_seq(self, seq, overhang=(100, 100)):
"""
Performe prediction of overhanged exon sequence string.
Args:
seq (str): sequence of overhanged exon.
overhang (Tuple[int, int]): overhang of seqeunce.
Returns:
np.array of modular predictions
as [[acceptor_intronM, acceptor, exon, donor, donor_intron]].
"""
batch = self.spliter.split(seq, overhang)
batch = {k: encodeDNA([v]) for k, v in batch.items()}
return self.predict_modular_scores_on_batch(batch)[0]
def _predict_batch(self, batch, optional_metadata=None):
optional_metadata = optional_metadata or []
X_ref = self.predict_modular_scores_on_batch(
batch['inputs']['seq'])
X_alt = self.predict_modular_scores_on_batch(
batch['inputs']['mut_seq'])
ref_pred = pd.DataFrame(X_ref, columns=mmsplice_ref_modules)
alt_pred = | pd.DataFrame(X_alt, columns=mmsplice_alt_modules) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
| tm.assert_numpy_array_equal(df.values, expected) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python
# coding: utf-8
# # Data analyses with Python & Jupyter
# ## Introduction
#
# You can do complex biological data manipulation and analyses using the `pandas` python package (or by switching kernels, using `R`!)
#
# We will look at pandas here, which provides `R`-like functions for data manipulation and analyses. `pandas` is built on top of NumPy. Most importantly, it offers an R-like `DataFrame` object: a multidimensional array with explicit row and column names that can contain heterogeneous types of data as well as missing values, which would not be possible using numpy arrays.
#
# `pandas` also implements a number of powerful data operations for filtering, grouping and reshaping data similar to R or spreadsheet programs.
# ## Installing Pandas
#
# `pandas` requires NumPy. See the [Pandas documentation](http://pandas.pydata.org/).
# If you installed Anaconda, you already have Pandas installed. Otherwise, you can `sudo apt install` it.
#
# Assuming `pandas` is installed, you can import it and check the version:
# In[1]:
import pandas as pd
pd.__version__
# Also import scipy:
# In[4]:
import scipy as sc
# ### Reminder about tabbing and help!
#
# As you read through these chapters, don't forget that Jupyter gives you the ability to quickly explore the contents of a package or methods applicable to an an object by using the tab-completion feature. Also documentation of various functions can be accessed using the ``?`` character. For example, to display all the contents of the pandas namespace, you can type
#
# ```ipython
# In [1]: pd.<TAB>
# ```
#
# And to display Pandas's built-in documentation, you can use this:
#
# ```ipython
# In [2]: pd?
# ```
# ## Pandas `dataframes`
#
# The dataframes is the main data object in pandas.
#
# ### importing data
# Dataframes can be created from multiple sources - e.g. CSV files, excel files, and JSON.
# In[2]:
MyDF = | pd.read_csv('../data/testcsv.csv', sep=',') | pandas.read_csv |
import os
import time
from warnings import simplefilter
simplefilter("ignore")
import glob
import codecs
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import json
import requests
from sklearn.externals import joblib
from matplotlib.gridspec import GridSpec
def refineBGInfo(bgdict):
#reorganizes api bg data
if bgdict["name"] != "Battlegrounds": return "Error, incorrect Data or Data Format received (refineBGInfo)"
else:
simpledict = {}
for item in bgdict["statistics"]: #putting relevant information into simple 1 dimensional dict
if "highest" in item.keys(): simpledict[item["name"]] = (item["highest"], str(item["quantity"]))
else: simpledict[item["name"]] = item["quantity"]
calctuple = calcStats(simpledict) # calculates battles/victories without EotS and estimates EotS stats
simpledict["Battlegrounds played (calculated)"], simpledict["Battlegrounds won (calculated)"] = calctuple[0]
simpledict["Eye of the Storm battles (estimated)"], simpledict["Eye of the Storm victories (estimated)"] = calctuple[1]
simpledict["Eye of the Storm battles deficit"], simpledict["Eye of the Storm victories deficit"] = calctuple[2]
if simpledict["Battleground played the most"] == 0: simpledict["Battleground played the most"] = ("None", "0")
if simpledict["Battleground won the most"] == 0: simpledict["Battleground won the most"] = ("None", "0")
cardsdict = {}
#reorganizing simpledict into dict with blocks of associated data ("cards")
cardsdict["General"] = {"Battlegrounds played": simpledict["Battlegrounds played"],
"Battlegrounds won": simpledict["Battlegrounds won"],
"Battleground played the most": simpledict["Battleground played the most"][0] + " (" + simpledict["Battleground played the most"][1] + ")",
"Battleground won the most": simpledict["Battleground won the most"][0] + " (" + simpledict ["Battleground won the most"][1] + ")",
"Battlegrounds played (calculated)": simpledict["Battlegrounds played (calculated)"],
"Battlegrounds won (calculated)": simpledict["Battlegrounds won (calculated)"]}
cardsdict["Alterac Valley"] = {"Alterac Valley battles": simpledict["Alterac Valley battles"],
"Alterac Valley victories": simpledict["Alterac Valley victories"],
"Alterac Valley towers defended": simpledict["Alterac Valley towers defended"],
"Alterac Valley towers captured": simpledict["Alterac Valley towers captured"]}
cardsdict["Arathi Basin"] = {"Arathi Basin battles": simpledict["Arathi Basin battles"],
"Arathi Basin victories": simpledict["Arathi Basin victories"]}
cardsdict["Battle for Gilneas"] = {"Battle for Gilneas battles": simpledict["Battle for Gilneas battles"],
"Battle for Gilneas victories": simpledict["Battle for Gilneas victories"]}
cardsdict["Eye of the Storm"] = {"Eye of the Storm battles": simpledict["Eye of the Storm battles"],
"Eye of the Storm victories": simpledict["Eye of the Storm victories"],
"Eye of the Storm flags captured": simpledict["Eye of the Storm flags captured"],
"Eye of the Storm battles (estimated)": simpledict["Eye of the Storm battles (estimated)"],
"Eye of the Storm victories (estimated)": simpledict["Eye of the Storm victories (estimated)"],
"Eye of the Storm battles deficit": simpledict["Eye of the Storm battles deficit"],
"Eye of the Storm victories deficit": simpledict["Eye of the Storm victories deficit"]}
cardsdict["Seething Shore"] = {"Seething Shore battles": simpledict["Seething Shore battles"],
"Seething Shore victories": simpledict["Seething Shore victories"]}
cardsdict["Strand of the Ancients"] = {"Strand of the Ancients battles": simpledict["Strand of the Ancients battles"],
"Strand of the Ancients victories": simpledict["Strand of the Ancients victories"]}
cardsdict["Twin Peaks"] = {"Twin Peaks battles": simpledict["Twin Peaks battles"],
"Twin Peaks victories": simpledict["Twin Peaks victories"],
"Twin Peaks flags captured": simpledict["Twin Peaks flags captured"],
"Twin Peaks flags returned": simpledict["Twin Peaks flags returned"]}
cardsdict["Warsong Gulch"] = {"Warsong Gulch battles": simpledict["Warsong Gulch battles"],
"Warsong Gulch victories": simpledict["Warsong Gulch victories"],
"Warsong Gulch flags captured": simpledict["Warsong Gulch flags captured"],
"Warsong Gulch flags returned": simpledict["Warsong Gulch flags returned"]}
cardsdict["Silvershard Mines"] = {"Silvershard Mines battles": simpledict["Silvershard Mines battles"],
"Silvershard Mines victories": simpledict["Silvershard Mines victories"]}
cardsdict["Temple of Kotmogu"] = {"Temple of Kotmogu battles": simpledict["Temple of Kotmogu battles"],
"Temple of Kotmogu victories": simpledict["Temple of Kotmogu victories"]}
cardsdict["Isle of Conquest"] = {"Isle of Conquest battles": simpledict["Isle of Conquest battles"],
"Isle of Conquest victories": simpledict["Isle of Conquest victories"]}
cardsdict["Deepwind Gorge"] = {"Deepwind Gorge battles": simpledict["Deepwind Gorge battles"],
"Deepwind Gorge victories": simpledict["Deepwind Gorge victories"]}
return cardsdict
def refineBGInfoforDB(bgdict):
#reorganizes api bg data
if bgdict["name"] != "Battlegrounds": return "Error, incorrect Data or Data Format received (refineBGInfo)"
else:
simpledict = {}
for item in bgdict["statistics"]: #putting relevant information into simple 1 dimensional dict
if "highest" in item.keys(): simpledict[item["name"]] = (item["highest"], str(item["quantity"]))
else: simpledict[item["name"]] = item["quantity"]
calctuple = calcStats(simpledict) # calculates battles/victories without EotS and estimates EotS stats
simpledict["Battlegrounds played (calculated)"], simpledict["Battlegrounds won (calculated)"] = calctuple[0]
simpledict["Eye of the Storm battles (estimated)"], simpledict["Eye of the Storm victories (estimated)"] = calctuple[1]
simpledict["Eye of the Storm battles deficit"], simpledict["Eye of the Storm victories deficit"] = calctuple[2]
if simpledict["Battleground played the most"] == 0: simpledict["Battleground played the most"] = ("None", 0)
if simpledict["Battleground won the most"] == 0: simpledict["Battleground won the most"] = ("None", 0)
dbdict = {"BG_played": simpledict["Battlegrounds played"], "BG_won": simpledict["Battlegrounds won"],
"played_most_n": simpledict["Battleground played the most"][0], "played_most_c":simpledict["Battleground played the most"][1],
"won_most_n":simpledict["Battleground won the most"][0], "won_most_c":simpledict["Battleground won the most"][1],
"BG_played_c": simpledict["Battlegrounds played (calculated)"], "BG_won_c": simpledict["Battlegrounds won (calculated)"],
"AV_played": simpledict["Alterac Valley battles"], "AV_won": simpledict["Alterac Valley victories"],
"AV_tower_def": simpledict["Alterac Valley towers defended"], "AV_tower_cap": simpledict["Alterac Valley towers captured"],
"AB_played": simpledict["Arathi Basin battles"], "AB_won": simpledict["Arathi Basin victories"],
"Gil_played": simpledict["Battle for Gilneas battles"], "Gil_won": simpledict["Battle for Gilneas victories"],
"EotS_played": simpledict["Eye of the Storm battles"], "EotS_won": simpledict["Eye of the Storm victories"],
"EotS_flags": simpledict["Eye of the Storm flags captured"], "EotS_played_est": simpledict["Eye of the Storm battles (estimated)"],
"EotS_won_est": simpledict["Eye of the Storm victories (estimated)"], "EotS_played_def": simpledict["Eye of the Storm battles deficit"],
"EotS_won_def": simpledict["Eye of the Storm victories deficit"],
"SS_played": simpledict["Seething Shore battles"], "SS_won": simpledict["Seething Shore victories"],
"SotA_played": simpledict["Strand of the Ancients battles"], "SotA_won": simpledict["Strand of the Ancients victories"],
"TP_played": simpledict["Twin Peaks battles"], "TP_won": simpledict["Twin Peaks victories"],
"TP_flags_cap": simpledict["Twin Peaks flags captured"], "TP_flags_ret": simpledict["Twin Peaks flags returned"],
"WS_played": simpledict["Warsong Gulch battles"], "WS_won": simpledict["Warsong Gulch victories"],
"WS_flags_cap": simpledict["Warsong Gulch flags captured"], "WS_flags_ret": simpledict["Warsong Gulch flags returned"],
"SM_played": simpledict["Silvershard Mines battles"], "SM_won": simpledict["Silvershard Mines victories"],
"TK_played": simpledict["Temple of Kotmogu battles"], "TK_won": simpledict["Temple of Kotmogu victories"],
"IoC_played": simpledict["Isle of Conquest battles"], "IoC_won": simpledict["Isle of Conquest victories"],
"DG_played": simpledict["Deepwind Gorge battles"], "DG_won": simpledict["Deepwind Gorge victories"]}
return dbdict
def createBGCharts(char, realm, cardsdict): #deprecated test function, use createMoreBGCharts(char, realm, cardsdict) instead
filename = "figures/" + char + "_" + realm + ".jpg"
totalGames = cardsdict["General"]["Battlegrounds played"]
totalWins = cardsdict["General"]["Battlegrounds won"]
colors = ["lightgreen", "lightcoral"]
sizes = [totalWins, totalGames - totalWins]
explode = (0.0,0)
fig = plt.figure()
plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False)
plt.axis("equal")
plt.title("Total Battlegrounds")
plt.legend(["Win","Loss"])
fig.savefig("static/" + filename)
plt.close(fig)
#filepath = os.path.abspath(filename)
return filename
def createMoreBGCharts(char, realm, cardsdict):
#creates pie chars from bg data (except eye of the storm -> faulty data from api)
filename = "figures/" + char + "_" + realm + "_.svg"
totalGames = cardsdict["General"]["Battlegrounds played"]
totalWins = cardsdict["General"]["Battlegrounds won"]
colors = ["lightgreen", "lightcoral"]
sizes = [totalWins, totalGames - totalWins]
explode = (0.0,0)
fig = plt.figure(figsize=(7,15), frameon=False)
gs1 = GridSpec(6,3)
plt.subplot(gs1[0:2, 0:3])
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Total Battlegrounds", y=1, fontsize=20)
plt.legend(["Win","Loss"])
plt.subplot(gs1[2, 0])
sizes = [cardsdict["Alterac Valley"]["Alterac Valley victories"],cardsdict["Alterac Valley"]["Alterac Valley battles"] - cardsdict["Alterac Valley"]["Alterac Valley victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False )
plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Alterac Valley", y=0.95)
plt.subplot(gs1[2, 1])
sizes = [cardsdict["Arathi Basin"]["Arathi Basin victories"],cardsdict["Arathi Basin"]["Arathi Basin battles"] - cardsdict["Arathi Basin"]["Arathi Basin victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Arathi Basin", y=0.95)
plt.subplot(gs1[2,2])
sizes = [cardsdict["Battle for Gilneas"]["Battle for Gilneas victories"],cardsdict["Battle for Gilneas"]["Battle for Gilneas battles"] - cardsdict["Battle for Gilneas"]["Battle for Gilneas victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Battle for Gilneas", y=0.95)
plt.subplot(gs1[3,0])
sizes = [cardsdict["Seething Shore"]["Seething Shore victories"],cardsdict["Seething Shore"]["Seething Shore battles"] - cardsdict["Seething Shore"]["Seething Shore victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Seething Shore", y=0.95)
plt.subplot(gs1[3,1])
sizes = [cardsdict["Twin Peaks"]["Twin Peaks victories"],cardsdict["Twin Peaks"]["Twin Peaks battles"] - cardsdict["Twin Peaks"]["Twin Peaks victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Twin Peaks", y=0.95)
plt.subplot(gs1[3,2])
sizes = [cardsdict["Warsong Gulch"]["Warsong Gulch victories"],cardsdict["Warsong Gulch"]["Warsong Gulch battles"] - cardsdict["Warsong Gulch"]["Warsong Gulch victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Warsong Gulch", y=0.95)
plt.subplot(gs1[4,0])
sizes = [cardsdict["Silvershard Mines"]["Silvershard Mines victories"],cardsdict["Silvershard Mines"]["Silvershard Mines battles"] - cardsdict["Silvershard Mines"]["Silvershard Mines victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Silvershard Mines", y=0.95)
plt.subplot(gs1[4,1])
sizes = [cardsdict["Temple of Kotmogu"]["Temple of Kotmogu victories"],cardsdict["Temple of Kotmogu"]["Temple of Kotmogu battles"] - cardsdict["Temple of Kotmogu"]["Temple of Kotmogu victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Temple of Kotmogu", y=0.95)
plt.subplot(gs1[4,2])
sizes = [cardsdict["Isle of Conquest"]["Isle of Conquest victories"],cardsdict["Isle of Conquest"]["Isle of Conquest battles"] - cardsdict["Isle of Conquest"]["Isle of Conquest victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Isle of Conquest", y=0.95)
plt.subplot(gs1[5,0])
sizes = [cardsdict["Deepwind Gorge"]["Deepwind Gorge victories"],cardsdict["Deepwind Gorge"]["Deepwind Gorge battles"] - cardsdict["Deepwind Gorge"]["Deepwind Gorge victories"]]
if sizes[1] + sizes[0] == 0: plt.pie([1], shadow=False, startangle=45, colors=["grey"], counterclock=False)
else: plt.pie(sizes, shadow=False, autopct=lambda p: "{:.2f}% ({:,.0f})".format(p,p * sum(sizes)/100), startangle=45, colors=colors, counterclock=False )
plt.axis("equal")
plt.title("Deepwind Gorge", y=0.95)
# plt.show()
fig.savefig("static/" + filename, bbox_inches='tight', pad_inches=0)
#filepath = os.path.abspath(filename)
return filename
def calcStats(simpledict): # calculates stats by aggregating stats for each bg without EotS
matchlist = []
victorieslist = []
for key in simpledict.keys():
if "battles" in key and "Rated" not in key: matchlist.append(simpledict[key])
if "victories" in key and "Rated" not in key: victorieslist.append(simpledict[key])
matches = sum(matchlist) - simpledict["Eye of the Storm battles"]
victories = sum(victorieslist) - simpledict["Eye of the Storm victories"]
ceotsbattles = simpledict["Battlegrounds played"] - matches
ceotsvictories = simpledict["Battlegrounds won"] - victories
matchdeficit = ceotsbattles - simpledict["Eye of the Storm battles"]
victoriesdeficit = ceotsvictories - simpledict["Eye of the Storm victories"]
return ((matches, victories), (ceotsbattles, ceotsvictories), (matchdeficit, victoriesdeficit))
def refineCharInfo(infodict):
raceList = {1:"Human", 2:"Orc", 3:"Dwarf", 4:"Night Elf", 5:"Undead", 6:"Tauren", 7:"Gnome", 8:"Troll", 9:"Goblin", 10:"Blood Elf", 11:"Draenei",
22:"Worgen", 24:"Pandaren", 25:"Pandaren", 26:"Pandaren", 27:"Nightborne", 28:"Highmountain Tauren", 29:"Void Elf", 30:"Lightforged Draeenei",
34:"Dark Iron Dwarf", 36:"Mag'har Orc"}
factionList = ["Alliance", "Horde", "Neutral"]
classList = ["Warrior", "Paladin", "Hunter", "Rogue", "Priest", "Death Knight", "Shaman", "Mage", "Warlock", "Monk", "Druid", "Demon Hunter"]
genderList = ["Male", "Female"]
infodict["avatar"] = "https://render-eu.worldofwarcraft.com/character/{}".format(infodict["thumbnail"])
infodict["class"] = classList[infodict["class"] - 1]
infodict["gender"] = genderList[infodict["gender"]]
infodict["faction"] = factionList[infodict["faction"]]
infodict["race"] = raceList.get(infodict["race"], "Unknown")
return infodict
def refineCharandRealm(char, realm):
char = char.lower()
realm = realm.lower()
return (char, realm)
def createBGDF(charlist, bglist):
df_char = | pd.DataFrame(charlist) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import division
from functools import wraps
import numpy as np
from pandas import DataFrame, Series
#from pandas.stats import moments
import pandas as pd
def simple_moving_average(prices, period=26):
"""
:param df: pandas dataframe object
:param period: periods for calculating SMA
:return: a pandas series
"""
weights = np.repeat(1.0, period) / period
sma = np.convolve(prices, weights, 'valid')
return sma
def stochastic_oscillator_k(df):
"""Calculate stochastic oscillator %K for given data.
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name='SO%k')
df = df.join(SOk)
return df
def stochastic_oscillator_d(df, n):
"""Calculate stochastic oscillator %D for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name='SO%k')
SOd = pd.Series(SOk.ewm(span=n, min_periods=n).mean(), name='SO%d')
df = df.join(SOd)
return df
def bollinger_bands(df, n, std, add_ave=True):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
ave = df['close'].rolling(window=n, center=False).mean()
sd = df['close'].rolling(window=n, center=False).std()
upband = pd.Series(ave + (sd * std), name='bband_upper_' + str(n))
dnband = pd.Series(ave - (sd * std), name='bband_lower_' + str(n))
if add_ave:
ave = pd.Series(ave, name='bband_ave_' + str(n))
df = df.join(pd.concat([upband, dnband, ave], axis=1))
else:
df = df.join(pd.concat([upband, dnband], axis=1))
return df
def money_flow_index(df, n):
"""Calculate Money Flow Index and Ratio for given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
PP = (df['high'] + df['low'] + df['close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.loc[i + 1, 'volume'])
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(MFR.rolling(n, min_periods=n).mean())
# df = df.join(MFI)
return MFI
def series_indicator(col):
def inner_series_indicator(f):
@wraps(f)
def wrapper(s, *args, **kwargs):
if isinstance(s, DataFrame):
s = s[col]
return f(s, *args, **kwargs)
return wrapper
return inner_series_indicator
def _wilder_sum(s, n):
s = s.dropna()
nf = (n - 1) / n
ws = [np.nan] * (n - 1) + [s[n - 1] + nf * sum(s[:n - 1])]
for v in s[n:]:
ws.append(v + ws[-1] * nf)
return Series(ws, index=s.index)
@series_indicator('high')
def hhv(s, n):
return pd.rolling_max(s, n)
@series_indicator('low')
def llv(s, n):
return | pd.rolling_min(s, n) | pandas.rolling_min |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pat_str(df_checks):
"""
Test output when names_pattern is a string,
and .value is present.
"""
result = (
df_checks.pivot_longer(
column_names="ht*",
names_to=(".value", "age"),
names_pattern="(.+)(.)",
sort_by_appearance=True,
)
.reindex(columns=["famid", "birth", "age", "ht"])
.astype({"age": int})
)
actual = pd.wide_to_long(
df_checks, stubnames="ht", i=["famid", "birth"], j="age"
).reset_index()
assert_frame_equal(result, actual)
def test_multiindex_column_level(df_multi):
"""
Test output from MultiIndex column,
when column_level is provided.
"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_multiindex(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
and there is no names_sep/names_pattern.
"""
result = df_multi.pivot_longer(index=[("name", "a")])
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
there is no names_sep/names_pattern,
and names_to is provided as a sequence.
"""
result = df_multi.pivot_longer(
index=[("name", "a")], names_to=["variable_0", "variable_1"]
)
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to_length_mismatch(df_multi):
"""
Raise error if the length of names_to does not
match the number of column levels.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_to=["variable_0", "variable_1", "variable_2"],
)
def test_multiindex_incomplete_level_names(df_multi):
"""
Raise error if not all the levels have names.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_multiindex_index_level_names_intersection(df_multi):
"""
Raise error if level names exist in index.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_no_column_names(df_checks):
"""
Test output if all the columns
are assigned to the index parameter.
"""
assert_frame_equal(
df_checks.pivot_longer(df_checks.columns).rename_axis(columns=None),
df_checks,
)
@pytest.fixture
def test_df():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
)
def test_names_pattern_str(test_df):
"""Test output for names_pattern and .value."""
result = test_df.pivot_longer(
column_names="*_*",
names_to=["set", ".value"],
names_pattern="(.+)_(.+)",
sort_by_appearance=True,
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=r".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_sep(test_df):
"""Test output for names_sep and .value."""
result = test_df.pivot_longer(
names_to=["set", ".value"], names_sep="_", sort_by_appearance=True
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=".+",
)
.reset_index("set")
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pattern_list():
"""Test output for names_pattern if list/tuple."""
df = pd.DataFrame(
{
"Activity": ["P1", "P2"],
"General": ["AA", "BB"],
"m1": ["A1", "B1"],
"t1": ["TA1", "TB1"],
"m2": ["A2", "B2"],
"t2": ["TA2", "TB2"],
"m3": ["A3", "B3"],
"t3": ["TA3", "TB3"],
}
)
result = df.pivot_longer(
index=["Activity", "General"],
names_pattern=["^m", "^t"],
names_to=["M", "Task"],
sort_by_appearance=True,
).loc[:, ["Activity", "General", "Task", "M"]]
actual = (
pd.wide_to_long(
df, i=["Activity", "General"], stubnames=["t", "m"], j="number"
)
.set_axis(["Task", "M"], axis="columns")
.droplevel(-1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def not_dot_value():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012": [48.1, 46.4, 44.3],
"floor_2012": [45.4, 41.6, 40.8],
"vault_2016": [46.9, 45.7, 44.3],
"floor_2016": [46.0, 42.0, 42.1],
}
)
def test_not_dot_value_sep(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep2(not_dot_value):
"""Test output when names_sep and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to="event",
names_sep="/",
values_to="score",
)
actual = not_dot_value.melt(
"country", var_name="event", value_name="score"
)
assert_frame_equal(result, actual)
def test_not_dot_value_pattern(not_dot_value):
"""Test output when names_pattern is a string and no dot_value"""
result = not_dot_value.pivot_longer(
"country",
names_to=("event", "year"),
names_pattern=r"(.+)_(.+)",
values_to="score",
sort_by_appearance=True,
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = not_dot_value.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_not_dot_value_sep_single_column(not_dot_value):
"""
Test output when names_sep and no dot_value
for a single column.
"""
A = not_dot_value.loc[:, ["country", "vault_2012"]]
result = A.pivot_longer(
"country",
names_to=("event", "year"),
names_sep="_",
values_to="score",
)
result = result.sort_values(
["country", "event", "year"], ignore_index=True
)
actual = A.set_index("country")
actual.columns = actual.columns.str.split("_", expand=True)
actual.columns.names = ["event", "year"]
actual = (
actual.stack(["event", "year"])
.rename("score")
.sort_index()
.reset_index()
)
assert_frame_equal(result, actual)
def test_multiple_dot_value():
"""Test output for multiple .value."""
df = pd.DataFrame(
{
"x_1_mean": [1, 2, 3, 4],
"x_2_mean": [1, 1, 0, 0],
"x_1_sd": [0, 1, 1, 1],
"x_2_sd": [0.739, 0.219, 1.46, 0.918],
"y_1_mean": [1, 2, 3, 4],
"y_2_mean": [1, 1, 0, 0],
"y_1_sd": [0, 1, 1, 1],
"y_2_sd": [-0.525, 0.623, -0.705, 0.662],
"unit": [1, 2, 3, 4],
}
)
result = df.pivot_longer(
index="unit",
names_to=(".value", "time", ".value"),
names_pattern=r"(x|y)_([0-9])(_mean|_sd)",
).astype({"time": int})
actual = df.set_index("unit")
cols = [ent.split("_") for ent in actual.columns]
actual.columns = [f"{start}_{end}{middle}" for start, middle, end in cols]
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["x_mean", "y_mean", "x_sd", "y_sd"],
i="unit",
j="time",
)
.sort_index(axis=1)
.reset_index()
)
assert_frame_equal(result, actual)
@pytest.fixture
def single_val():
"""fixture dataframe"""
return pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
}
)
def test_multiple_dot_value2(single_val):
"""Test output for multiple .value."""
result = single_val.pivot_longer(
index="id", names_to=(".value", ".value"), names_pattern="(.)(.)"
)
assert_frame_equal(result, single_val)
def test_names_pattern_sequence_single_unique_column(single_val):
"""
Test output if names_pattern is a sequence of length 1.
"""
result = single_val.pivot_longer(
"id", names_to=["x"], names_pattern=("x",)
)
actual = (
pd.wide_to_long(single_val, ["x"], i="id", j="num")
.droplevel("num")
.reset_index()
)
assert_frame_equal(result, actual)
def test_names_pattern_single_column(single_val):
"""
Test output if names_to is only '.value'.
"""
result = single_val.pivot_longer(
"id", names_to=".value", names_pattern="(.)."
)
actual = (
| pd.wide_to_long(single_val, ["x"], i="id", j="num") | pandas.wide_to_long |
#!/usr/bin/env python3
import pandas as pd
from pykakasi import kakasi
kakasi=kakasi()
kakasi.setMode('H', 'a')
kakasi.setMode('K', 'a')
kakasi.setMode('J', 'a')
conv = kakasi.getConverter()
pd.set_option('display.max_rows',1000)
listdf=pd.read_csv('crawl.txt', comment='#') #取得対象の読み込み
obsdf=pd.read_csv('obs.txt') #全観測所リストの読み込み
list0= | pd.merge(listdf,obsdf,on='観測所番号',how='left') | pandas.merge |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64") | pandas.Series |
# Some utilites functions for loading the data, adding features
import numpy as np
import pandas as pd
from functools import reduce
from sklearn.preprocessing import MinMaxScaler
def load_csv(path):
"""Load dataframe from a csv file
Args:
path (STR): File path
"""
# Load the file
df = pd.read_csv(path)
# Lowercase column names
df.rename(columns=lambda x: x.lower().strip(), inplace=True)
return df
def add_time_features(df):
"""Add time features for the data
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
df['ds'] = pd.to_datetime(df['update_time']) + df['hour_id'].astype('timedelta64[h]')
df['dow'] = df['ds'].dt.dayofweek
df['month'] = df['ds'].dt.month
df['doy'] = df['ds'].dt.dayofyear
df['year'] = df['ds'].dt.year
df['day'] = df['ds'].dt.day
df['week'] = df['ds'].dt.week
# Normalise day of week col
week_period = 7 / (2 * np.pi)
df['dow_norm'] = df.dow.values / week_period
return df
def add_special_days_features(df):
"""Add special events and holidays features
Args:
df (DataFrame): Input dataframe
Return: the modified df
"""
# Days when there were sudden decrease/increase in bandwidth/max users
range1 = pd.date_range('2018-02-10', '2018-02-27')
range2 = pd.date_range('2019-01-30', '2019-02-12')
abnormals = range1.union(range2)
# For zone 1 only
# range3 = pd.date_range('2017-12-23', '2017-12-25')
# Init 2 new columns
df['abnormal_bw'], df['abnormal_u'] = 0,0
# Set the abnormal weights for each zone (negative if decrease, positive if increase)
# For total bandwidth
df.loc[df['zone_code'].isin(['ZONE01']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE01'])].update_time.apply(lambda date: -1 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE02']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE02'])].update_time.apply(lambda date: 0.8 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE03']) ,'abnormal_bw'] = df[df['zone_code'].isin(['ZONE03'])].update_time.apply(lambda date: 0.2 if pd.to_datetime(date) in abnormals else 0)
# For max users
df.loc[df['zone_code'].isin(['ZONE01']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE01'])].update_time.apply(lambda date: -1 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE02']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE02'])].update_time.apply(lambda date: 0.8 if pd.to_datetime(date) in abnormals else 0)
df.loc[df['zone_code'].isin(['ZONE03']) ,'abnormal_u'] = df[df['zone_code'].isin(['ZONE03'])].update_time.apply(lambda date: 0.6 if pd.to_datetime(date) in abnormals else 0)
# Holidays
holidays = pd.to_datetime(['2018-01-01', '2017-12-23', '2017-12-24', '2017-12-25',
'2018-02-14', '2018-02-15', '2018-02-16', '2018-02-17', '2018-02-18', '2018-02-19', '2018-02-20',
'2018-03-27', '2018-04-30', '2018-05-01', '2018-09-02', '2018-09-03', '2018-12-31',
'2019-01-01', '2019-02-04', '2019-02-05', '2019-02-06', '2019-02-07', '2019-02-08',
'2019-04-15',
'2019-04-29', '2019-04-30', '2019-05-01', '2019-09-02',
])
df['holiday'] = df.update_time.apply(lambda date: 1 if pd.to_datetime(date) in holidays else 0)
return df
def zone_features(df, zfeatures, aufeatures):
"""Create zone features from the data
Args:
df (DataFrame): Input dataframe
zfeatures (list): List of zone median features
aufeatures (list): List of zone autocorr features
Return: 2 dataframes
"""
# Medians from the last 1,3,6,12 months
zones_1y = df[(df['ds'] >= '2018-03-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1y.columns = ['zone_code','median_user_1y','median_bw_1y']
zones_1m = df[(df['ds'] >= '2019-02-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1m.columns = ['zone_code','median_user_1m','median_bw_1m']
zones_3m = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_3m.columns = ['zone_code','median_user_3m','median_bw_3m']
zones_6m = df[(df['ds'] >= '2018-09-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_6m.columns = ['zone_code','median_user_6m','median_bw_6m']
# Autocorrelation features
zones_autocorr = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': {
'lag_user_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_user_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_user_1w' :lambda x: | pd.Series.autocorr(x, 24*7) | pandas.Series.autocorr |
#!/usr/bin/env python
"""
Author: <NAME>
Mail: <EMAIL>
Last updated: 24/04/2020
Takes a dataset and an optional tolerance as arguments
Produces a report file containing the confusion matrix, ACC, MCC and selected
threshold for 10 randomised cross validation runs on a 80/20 split of the
dataset.
It produces also a roc curve plot on the entire dataset and a plot of the MCC vs
threshold used for all the training sets.
It prints a log of the operation to STDOUT.
"""
import sys
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn import utils
from sklearn import model_selection
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def get_df(path):
print("Inizialising dataset:", path)
col_names = ["ID", "Feature", "Class"]
df = pd.read_csv(path, sep="\s+", names=col_names)
return df
def get_train_test(df, set_indeces_iterator, numsplits=5):
set_indeces = next(set_indeces_iterator)
df_train = df.iloc[set_indeces[0]]
df_test = df.iloc[set_indeces[1]]
return df_train, df_test
def get_stats(df, thr):
y_true = df["Class"].values
y_pred = [1 if (value < thr) else 0 for value in df["Feature"].values]
confusion_mat = metrics.confusion_matrix(y_true, y_pred, labels=[1, 0])
ACC = metrics.accuracy_score(y_true, y_pred)
MCC = metrics.matthews_corrcoef(y_true, y_pred)
return confusion_mat, ACC, MCC
def get_wrong_predictions(df, thr):
false_positives = df.loc[(df["Class"] == 0) & (df["Feature"] < thr)]
false_negatives = df.loc[(df["Class"] == 1) & (df["Feature"] > thr)]
wrong_pred_report = false_positives, false_negatives
return wrong_pred_report
def thr_explore(df, start_thr=-100, stop_thr=1, step_thr=1):
MCC_list, exp_list, ACC_list = [], [], []
print("Threshold\tACC\tMCC")
for exp in range(start_thr, stop_thr, step_thr):
thr = 10 ** exp
y_true = df["Class"].values
y_pred = [1 if (value < thr) else 0 for value in df["Feature"].values]
MCC = metrics.matthews_corrcoef(y_true, y_pred)
ACC = metrics.accuracy_score(y_true, y_pred)
MCC_list.append(MCC)
ACC_list.append(ACC)
exp_list.append(exp)
print(thr, ACC, MCC)
return MCC_list, exp_list, ACC_list
def get_best_thr(df, start_thr=-100, stop_thr=1, step_thr=1):
MCC_list, exp_list, ACC_list = thr_explore(df, start_thr, stop_thr, step_thr)
thr_list = [10 ** exp for exp in exp_list]
best_MCC = -2
for i in range(len(MCC_list)):
if MCC_list[i] > best_MCC:
best_exp_list = [exp_list[i]]
best_MCC = MCC_list[i]
elif MCC_list[i] == best_MCC:
best_exp_list.append(exp_list[i])
best_exp = np.mean(best_exp_list)
best_thr = 10 ** best_exp
print("Selected threshold:", best_thr)
return best_thr, thr_list, MCC_list, ACC_list
def get_roc_auc(df):
y_true = df["Class"].values # ROC curve calculation
y_score = [-value for value in df["Feature"].values]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc = metrics.auc(fpr, tpr)
return fpr, tpr, auc
def train_and_test(df, numsplits, rand_seed, start_thr=-100, stop_thr=1, step_thr=1):
(train_report, all_MCCs, all_ACCs, test_auc_list, test_tpr_list, test_fpr_list,) = (
[],
[],
[],
[],
[],
[],
)
false_positives, false_negatives = pd.DataFrame(), pd.DataFrame()
print("\nSplitting the dataset in", numsplits, "Kfolds")
set_indeces_iterator = model_selection.StratifiedKFold( # creates an iterator that at each iteration
n_splits=numsplits, # returns 2 arrays of indeces for splitting the fold
shuffle=True,
random_state=rand_seed,
).split(
df, y=df["Class"] # the y makes it respect the stratification of the classes
)
for j in range(numsplits):
# training
print("\nTraining on fold", j + 1)
df_train, df_test = get_train_test(df, set_indeces_iterator, numsplits)
thr, thr_list, MCC_list, ACC_list = get_best_thr(
df_train, start_thr, stop_thr, step_thr
)
stats_train = get_stats(df_train, thr)
stats_test = get_stats(df_test, thr)
# testing
train_roc_auc = get_roc_auc(df_train)
test_roc_auc = get_roc_auc(df_test)
test_fpr_list += list(test_roc_auc[0]) # for roc plot
test_tpr_list += list(test_roc_auc[1]) # for roc plot
test_auc_list.append(test_roc_auc[2]) # for roc plot
train_report.append(
(stats_test[0], stats_test[1], stats_test[2], thr, test_roc_auc[2])
)
wrong_pred_report = get_wrong_predictions(df_test, thr)
false_positives = pd.concat([false_positives, wrong_pred_report[0]])
false_negatives = pd.concat([false_negatives, wrong_pred_report[1]])
all_MCCs += MCC_list # for MCC-E value plot
all_ACCs += ACC_list # for ACC-E value plot
print("\nPerformance on fold", j + 1)
print(" Training set\tTest set")
print("AUC", train_roc_auc[2], test_roc_auc[2])
print("ACC", str(stats_train[1]), str(stats_test[1]))
print("MCC", str(stats_train[2]), str(stats_test[2]))
print("CM train\n", stats_train[0])
print("CM test\n", stats_test[0])
print("\nFalse positives in the test set")
print(wrong_pred_report[0].to_string(index=False))
print("\nFalse negatives in the test set")
print(wrong_pred_report[1].to_string(index=False))
wrong_pred_report_final = false_positives, false_negatives
thr_MCC_report = (thr_list * numsplits, all_MCCs, all_ACCs)
roc_curve_report = (test_auc_list, test_tpr_list, test_fpr_list)
return train_report, thr_MCC_report, roc_curve_report, wrong_pred_report_final
def get_final_stats(train_report):
final_stats = {}
list_AUC = [train[4] for train in train_report]
arr_AUC = np.array(list_AUC)
list_ACC = [train[1] for train in train_report]
arr_ACC = np.array(list_ACC)
list_MCC = [train[2] for train in train_report]
arr_MCC = np.array(list_MCC)
arr_evalue = [train[3] for train in train_report]
arr_exp = np.log10(arr_evalue)
avg_exp = np.mean(arr_exp)
list_cm = [np.array(train[0]) for train in train_report]
tp_tot = sum([cm[0][0] for cm in list_cm])
fp_tot = sum([cm[0][1] for cm in list_cm])
fn_tot = sum([cm[1][0] for cm in list_cm])
tn_tot = sum([cm[1][1] for cm in list_cm])
final_stats["avg_AUC"] = np.mean(arr_AUC)
final_stats["std_AUC"] = np.std(arr_AUC)
final_stats["avg_ACC"] = np.mean(arr_ACC)
final_stats["std_ACC"] = np.std(arr_ACC)
final_stats["avg_MCC"] = np.mean(arr_MCC)
final_stats["std_MCC"] = np.std(arr_MCC)
final_stats["avg_evalue"] = 10 ** avg_exp
final_stats["final_cm"] = (tp_tot, fp_tot, fn_tot, tn_tot)
return final_stats
def write_report(final_report_list):
with open("cross_val_report.dat", "w") as f:
for final_report in final_report_list:
f.write(final_report)
def get_final_report(train_report, wrong_pred_report, argv_index):
final_report = ""
sep = "\t"
final_report += "# Input file: " + str(sys.argv[argv_index]) + "\n"
final_report += (
"tp"
+ sep
+ "fp"
+ sep
+ "fn"
+ sep
+ "tn"
+ sep
+ "ACC"
+ sep
+ "MCC"
+ sep
+ "thr"
+ "\n"
)
for train in train_report:
cm, ACC, MCC, thr = train[0], train[1], train[2], train[3]
final_report += (
str(cm[0][0])
+ sep
+ str(cm[0][1])
+ sep
+ str(cm[1][0])
+ sep
+ str(cm[1][1])
+ sep
+ str(ACC)
+ sep
+ str(MCC)
+ sep
+ str(thr)
+ "\n"
)
final_stats = get_final_stats(train_report)
final_report += (
"---"
+ "\nAverage AUC: "
+ str(final_stats["avg_AUC"])
+ "\nStandard deviation AUC: "
+ str(final_stats["std_AUC"])
+ "\nAverage ACC: "
+ str(final_stats["avg_ACC"])
+ "\nStandard deviation ACC: "
+ str(final_stats["std_ACC"])
+ "\nAverage MCC: "
+ str(final_stats["avg_MCC"])
+ "\nStandard deviation MCC: "
+ str(final_stats["std_MCC"])
+ "\nAverage E value threshold: "
+ str(final_stats["avg_evalue"])
+ "\nFinal confusion matrix (elementwise sum of test confusion matrices): "
+ str(final_stats["final_cm"])
)
final_report += (
"\n\nFalse positives from all the test sets\n"
+ wrong_pred_report[0].drop_duplicates().to_string(index=False)
+ "\n\nFalse negatives from all the test sets\n"
+ wrong_pred_report[1].drop_duplicates().to_string(index=False)
)
if argv_index <= (len(sys.argv) - 1):
final_report += "\n\n"
else:
final_report += "\n"
return final_report
def plot_roc_curve(roc_curve_report_list):
for i in range(len(sys.argv) - 1):
auc_list, tpr_list, fpr_list = roc_curve_report_list[i]
is_beginning = True
tpr_list = tpr_list[-len(fpr_list) :]
tpr_list.insert(0, 0.0)
fpr_list.insert(0, 0.0)
random_tpr_list = [val for val in fpr_list]
sns.lineplot(
x=fpr_list,
y=tpr_list,
markers=True,
color=sns.color_palette()[i],
estimator=None,
)
plt.plot(fpr_list, random_tpr_list, color="r", ls="dashed")
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(labels=[sys.argv[i + 1] + " E Value ROC", "Random Classifier"])
plt.savefig("roc_plot" + sys.argv[i + 1] + ".png")
plt.clf()
def plot_thr_ACC(thr_MCC_report_list, final_stats_list):
best_thr, df_list, label_list = [], [], []
for i in range(len(sys.argv) - 1):
df = pd.DataFrame(thr_MCC_report_list[i]).T
df.columns = ["E value", "MCC", "ACC"]
df["Filename"] = sys.argv[i + 1]
df_list.append(df)
best_thr.append(final_stats_list[i]["avg_evalue"])
label_list.append(sys.argv[i + 1])
df_all = | pd.concat(df_list) | pandas.concat |
"""
Original data:公司股市代號對照表.csv
Conditions:
1.單月營收歷月排名 1高
from 月營收創新高.xlsx
2.負債比 < 40%
季度
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%E4%BD%94%E7%B8%BD%E8%B3%87%E7%94%A2%E6%AF%94%E6%9C%80%E9%AB%98%40%40%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%40%40%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%E4%BD%94%E7%B8%BD%E8%B3%87%E7%94%A2%E6%AF%94%E6%9C%80%E9%AB%98
3.全體董監持股 + 本國法人持股 > 30%
全體董監
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29%40%40%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%40%40%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29
本國法人
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E6%9C%AC%E5%9C%8B%E6%B3%95%E4%BA%BA%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29%40%40%E6%9C%AC%E5%9C%8B%E6%B3%95%E4%BA%BA%40%40%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29
4.全體董監質押比 < 10%
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%E8%B3%AA%E6%8A%BC%E6%AF%94%E4%BE%8B%28%25%29%40%40%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%40%40%E8%B3%AA%E6%8A%BC%E6%AF%94%E4%BE%8B%28%25%29
和全體董監持股是相同的資料,僅排序不同
5.毛利率, 營益率, 稅後淨利率上升(三率三升)
毛利率歷季排名 1 (第一名)
營益率歷季排名 1
淨利率歷季排名 1
6.殖利率 > 1%
現金殖利率
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E7%8F%BE%E9%87%91%E6%AE%96%E5%88%A9%E7%8E%87+%28%E6%9C%80%E6%96%B0%E5%B9%B4%E5%BA%A6%29%40%40%E7%8F%BE%E9%87%91%E6%AE%96%E5%88%A9%E7%8E%87%40%40%E6%9C%80%E6%96%B0%E5%B9%B4%E5%BA%A6
7.多項排列(均線)
均線 向上
8.現金流量比率 > 0 or 營業現金流量 > 0(skip)
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E6%99%BA%E6%85%A7%E9%81%B8%E8%82%A1&INDUSTRY_CAT=%E6%9C%88K%E7%B7%9A%E7%AA%81%E7%A0%B4%E5%AD%A3%E7%B7%9A%40%40%E6%9C%88K%E7%B7%9A%E5%90%91%E4%B8%8A%E7%AA%81%E7%A0%B4%E5%9D%87%E5%83%B9%E7%B7%9A%40%40%E5%AD%A3%E7%B7%9A
9.股票尚未經歷大漲大跌(skip)
"""
import sys
import pdb
import time
import pandas as pd
import random
from datetime import datetime
import global_vars
from stock_web_crawler import stock_crawler, delete_header, excel_formatting
from stock_info import stock_ID_name_mapping
# global variables
DEBT_RATIO = 40 # 負債比
STAKEHOLDING = 30 # 持股
PLEDGE_RATIO = 10 # 質押比
GROSS_MARGIN = 20 # 毛利率
OPERATING_MARGIN = 20 # 營益率
NET_PROFIT_MARGIN = 20 # 稅後淨利率
DIVIDEND_YIELD = 1 # 現金殖利率
def main():
file_path = global_vars.DIR_PATH + "公司股市代號對照表.csv"
stock_ID = list()
stock_name = list()
with open(file_path, 'r', encoding="UTF-8") as file_r:
file_r.readline() # skip the first row
for line in file_r:
line = line.split(",")
stock_ID.append(line[0])
stock_name.append(line[1])
df_combine = pd.DataFrame(list(zip(stock_ID, stock_name)), columns=["代號", "名稱"])
file_path = global_vars.DIR_PATH + "月營收創新高.xlsx"
try:
last_month = datetime.now().month-1
if last_month <= 0:
last_month = 12
df = | pd.read_excel(file_path, sheet_name=f"{last_month}月") | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from src.metrics_helpers import score_predictions
def fit_predict(X, y, index):
"""Train and make in-sample prediction."""
clf = LinearRegression().fit(X, y)
daily = clf.predict(X)
daily_predicted = pd.Series(daily, index=index, name="predicted")
return [daily_predicted, clf]
def score_model(
features,
df_ca_fold,
categoricals,
numericals,
district,
ca_name,
y,
):
"""Add features, standardize, encode and score in-sample prediction."""
X_fold, y_fold = df_ca_fold[features], df_ca_fold["num_trips"]
# OHE categoricals
df_cats = pd.get_dummies(
X_fold[categoricals],
columns=categoricals,
drop_first=False,
prefix=categoricals,
prefix_sep="=",
)
df_cats.columns = df_cats.columns.str.replace("month=", "").str.replace(
"weekday=", ""
)
df_cats = df_cats.drop(["is_holiday=0", "is_fireworks=0"], axis=1)
# Standardize numericals
ss = StandardScaler()
ss.fit(X_fold[numericals])
X_nums_tr = ss.transform(X_fold[numericals])
X_nums_tr = pd.DataFrame(X_nums_tr, columns=numericals, index=X_fold.index)
# Combine processed numericals and categoricals
X_fold = (
df_cats.merge(
X_nums_tr, left_index=True, right_index=True, how="left"
).merge(
X_fold[["daycount"]],
left_index=True,
right_index=True,
how="left",
)
).reset_index(drop=True)
# Fit model and generate (in-sample) predictions
daily_predicted_fold, clf_fitted = fit_predict(
X_fold, y_fold, df_ca_fold["startdate"]
)
# Get model coefficients
coefs = clf_fitted.coef_
df_coefs_fold = (
(
pd.Series(coefs, index=list(X_fold), name="coef")
.reset_index()
.rename(columns={"index": "feature"})
)
.assign(year=y)
.assign(NAME_start=district)
)
# Remove standardization from model coefficients for numericals
ss_scale_array = pd.Series(ss.scale_).to_numpy()
nums_mask = df_coefs_fold["feature"].isin(numericals)
coefs = df_coefs_fold.loc[nums_mask, "coef"]
df_coefs_fold.loc[nums_mask, "coef"] = coefs.to_numpy() / ss_scale_array
# Gather results
model_eval_dict = {
"community": ca_name,
"year": y,
"NAME": district,
}
model_eval_dict.update(
score_predictions(
df_ca_fold.set_index("startdate")["num_trips"].to_numpy(),
daily_predicted_fold.to_numpy(),
get_r2=True,
)
)
return [df_coefs_fold, model_eval_dict]
def cross_validate(
data, cv_fold_years, categoricals, numericals, features, years_to_use
):
"""For each CA, preprocess data, train and score in-sample prediction."""
dfs_coefs_all = []
dfs_model_eval = []
for ca_name in data["community"].unique():
df_ca = data[
(data["community"] == ca_name)
& (data["startdate"].dt.year.isin(years_to_use))
].copy()
d = {}
d_model_eval = []
for y in cv_fold_years:
outputs = score_model(
features,
df_ca[df_ca["startdate"].dt.year <= y].assign(
daycount=np.arange(
len(df_ca[df_ca["startdate"].dt.year <= y])
)
),
categoricals,
numericals,
df_ca["NAME"].iloc[0],
ca_name,
y,
)
d[str(y)] = outputs[0]
d_model_eval.append(outputs[1])
# Combine model coefficients per CV fold
df_coefs_cv = pd.concat(list(d.values()), ignore_index=True).assign(
community_start=ca_name
)
# Combine model evaluation per CV fold
df_model_eval = pd.DataFrame.from_records(d_model_eval)
dfs_coefs_all.append(df_coefs_cv)
dfs_model_eval.append(df_model_eval)
df_coefs_all_folds = pd.concat(dfs_coefs_all, ignore_index=True)
df_model_eval_all_folds = | pd.concat(dfs_model_eval, ignore_index=True) | pandas.concat |
import pandas as pd
import json
def get_dict_index():
""" Read the Title column of the excel file and assign it
to two index for each sheet and return the indices
"""
file1 = "data/dictionary/POS Dictionary.xlsx"
df1 = pd.read_excel(file1,
"Sheet2") # Read sheet2 first since that one contains all the word definitions
df2 = pd.read_excel(file1,
"Sheet1")
df1.set_index('Title')
df2.set_index('Title')
df1.dropna()
df2.dropna()
index1 = df1['Title'].values[:]
index2 = df2['Title'].values[:]
index1 = index1[~pd.isnull(index1)]
index2 = index2[~ | pd.isnull(index2) | pandas.isnull |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("sort", [True, False])
def test_factorize(index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = pd.Index
if isinstance(obj, pd.MultiIndex):
constructor = pd.MultiIndex.from_tuples
expected_uniques = constructor(obj.unique())
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
tm.assert_index_equal(result_uniques, expected_uniques)
def test_series_factorize_na_sentinel_none():
# GH35667
values = np.array([1, 2, 1, np.nan])
ser = pd.Series(values)
codes, uniques = ser.factorize(na_sentinel=None)
expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)
expected_uniques = | pd.Index([1.0, 2.0, np.nan]) | pandas.Index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.