prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sd(X)': self.df.groupby('grp')['X'].std()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].std().values,
'grp': ['A', 'B']
},
index=['sd(X)', 'sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_standard_deviation_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, np.sqrt(0.75))
def test_weighted_standard_deviation_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((np.sqrt(2), 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted sd(X)'
testing.assert_series_equal(output, expected)
def test_weighted_standard_deviation_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted sd(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted sd(X)': [np.sqrt(2), 1]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [np.sqrt(2), 1],
'grp': ['A', 'B']
},
index=['Y-weighted sd(X)', 'Y-weighted sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_cv_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.sqrt(1 / 3.))
def test_cv_biased(self):
metric = metrics.CV('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0) / np.mean(self.df.X))
def test_cv_split_by_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std() / [1, 2.75]
expected.name = 'cv(X)'
testing.assert_series_equal(output, expected)
def test_cv_where(self):
metric = metrics.CV('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std() / 2.75
self.assertEqual(output, expected)
def test_cv_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cv(X)': [np.sqrt(1 / 3.)]})
testing.assert_frame_equal(output, expected)
def test_cv_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [np.sqrt(1 / 3.)]}, index=['cv(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'cv(X)': [0, np.sqrt(1 / 8.25)]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
data={
'Value': [0, np.sqrt(1 / 8.25)],
'grp': ['A', 'B']
},
index=['cv(X)', 'cv(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_correlation(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.corrcoef(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.corr(self.df.Y))
def test_weighted_correlation(self):
metric = metrics.Correlation('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
cov = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)
expected = pd.DataFrame(
{'Y-weighted corr(X, Y)': [cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])]})
testing.assert_frame_equal(output, expected)
def test_correlation_method(self):
metric = metrics.Correlation('X', 'Y', method='kendall')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.corr(self.df.Y, method='kendall'))
def test_correlation_kwargs(self):
metric = metrics.Correlation('X', 'Y', min_periods=10)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertTrue(pd.isnull(output))
def test_correlation_split_by_not_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
corr_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = | pd.Series([corr_a, corr_b], index=['A', 'B']) | pandas.Series |
"""Scrapes motions tabs. Not Motion statistics tabs."""
import pandas as pd
import os.path
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from typing import List, Dict, Tuple
from pathlib import Path
from urllib.parse import urljoin
def save_all_motions(tournament_name: str, base_url: str, driver: webdriver.Edge) -> None:
"""Saves in a csv file all motions from the given tournament"""
filepath = Path('scraped_data/' + tournament_name + ' - Motions.csv')
filepath.parent.mkdir(parents=True, exist_ok=True)
if os.path.exists(filepath):
return
df = scrape_motions(base_url, driver)
if df.empty:
return
df.to_csv(filepath) # save as csv
def scrape_motions(base_url: str, driver: webdriver.Edge) -> pd.DataFrame:
"""Given a filepath containing tournament name and basic URL, extracts each round's name,
motion and infoslide."""
valid_url = get_valid_motions_url(base_url, driver)
if valid_url == '':
return | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import pickle
import numpy as np
from sklearn import preprocessing, impute
import directories
RND_SEED = 1729
def preprocess_training_inputs():
with open(os.path.join(directories.model_dir,"training_features.p"),"rb") as file:
training_inputs = pickle.load(file)
# ground truth
training_outputs = training_inputs.pop("LABEL")
# encode F/M as 1/0
enc = preprocessing.OrdinalEncoder().fit(pd.DataFrame(training_inputs.GENDER))
training_inputs.GENDER = enc.transform(pd.DataFrame(training_inputs.GENDER))
# extract epoch (number of 6-hr windows away from pressor event)
# and patient-tag indicating pressor nor not (observed label)
# and subject ID
training_times = training_inputs.pop("EPOCH")
pressor_at_all = training_inputs.pop("EPISODE")
pressor_at_all = pressor_at_all==1
training_groups = training_inputs.pop("SUBJECT_ID").values
hadm_id = training_inputs.pop("HADM_ID")
scaler = preprocessing.StandardScaler().fit(training_inputs)
with open(os.path.join(directories.model_dir,"scaler_encoder.p"), "wb") as file:
pickle.dump({"scaler": scaler,"encoder": enc}, file)
X = training_inputs
X = | pd.DataFrame(X,columns=training_inputs.columns) | pandas.DataFrame |
import pandas as pd
import microdf as mdf
V1 = [1, 2, 3]
V2 = [4, 5, 6]
W1 = [7, 8, 9]
W2 = [10, 11, 12]
DF1 = pd.DataFrame({"v": V1, "w": W1})
DF2 = | pd.DataFrame({"v": V2, "w": W2}) | pandas.DataFrame |
# This file is taken from:
# https://github.com/polaris-slo-cloud/polaris-ai/blob/main/predictive_monitoring/lstm_approach/gcd_data_manipulation.py
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
def load_data(input_path, selected_columns):
# Load data
readings_df = read_csv(input_path, index_col=0)
df = readings_df[selected_columns]
return df
def data_aggregation(df, aggr_type="mean"):
"""
:param aggr_type: aggregation type
:type df: DataFrame
"""
if aggr_type == 'mean':
df = df.groupby('end time').mean()
elif aggr_type == 'q95':
df = df.groupby('end time').quantile(0.95)
elif aggr_type == 'max':
df = df.groupby('end time').max()
return df
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
agg = | concat(cols, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Classifier Examples: some use case examples for building and assessing classifiers.
This will become a note book once complete.
"""
__author__ = ["TonyBagnall"]
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sktime.classification.dictionary_based import BOSSEnsemble, ContractableBOSS
from sktime.datasets import load_unit_test
def build_classifiers():
"""Examples of building a classifier.
1. Directly from 2D numpy arrays.
2. Directly from 3D numpy arrays.
3. From a nested pandas.
4. From a baked in dataset.
5. From any UCR/UEA dataset downloaded from timeseriesclassification.com.
"""
# Create an array
# Random forest, rocket and HC2.
randf = RandomForestClassifier()
trainX, train_y, testX, test_y = make_toy_2d_problem()
X = trainX.reshape(trainX.shape[0], 1, trainX.shape[1])
train_y = | pd.Series(train_y) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 14:22:56 2021
@author: KRS1BBH
"""
from ImportFilter import Importfile
import pandas as pd
import os, glob
#get path of directory script is executed from
dirname = os.path.dirname(__file__)
#nuk
Filelist=[dirname+'/testdata/NuK/LotResultSummaryAll.csv']
product='test'
recipe='test'
equipment='NuK'
data_object_nuk= | pd.DataFrame() | pandas.DataFrame |
"""
Enrich2 experiment experiment module
====================================
This module contains the class used by ``Enrich2`` to represent an
experiment. This class coordinates experimental conditions, and typically
sits at the top level of the tree structure.
"""
import logging
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..base.config_constants import SCORER, SCORER_OPTIONS, SCORER_PATH
from ..base.config_constants import CONDITIONS
from ..base.utils import compute_md5, log_message
from ..base.constants import WILD_TYPE_VARIANT
from ..base.storemanager import StoreManager
from ..statistics.random_effects import partitioned_rml_estimator
from ..statistics.random_effects import nan_filter_generator
from .condition import Condition
__all__ = ["Experiment"]
class Experiment(StoreManager):
"""
Class for a coordinating multiple
:py:class:`~enrich2.selection.selection.Selection` objects. Creating an
:py:class:`~enrich2.experiment.experiment.Experiment` requires a valid
*config* object, usually from a ``.json`` configuration file.
Attributes
----------
conditions : `list`
A list of :py:class:`~enrich2.experiment.condition.Condition` objects
_wt : :py:class:`~enrich2.sequence.wildtype.WildTypeSequence`
Methods
-------
wt
Property returning the wildtype sequence object.
configure
Configures the object from an dictionary loaded from a configuration
file.
serialize
Returns a `dict` with all configurable attributes stored that can
be used to reconfigure a new instance.
validate
Validates the attributes of this instance.
_children
Concrete method returning sorted conditions.
remove_child_id
Removes the child with the specified ``treeview_id``
add_child
Adds a child to this instance's children.
selection_list
Return a list of selections managed by this instance.
is_coding
Returns a boolean indicating if all children have coding sequences.
has_wt_sequence
Returns a boolean indicating if all children have a wt sequence.
calculate
Calculates combined scores with statistics from selections
and conditions.
combine_barcode_maps
Combine all barcode maps for selections into a single dataframe.
calc_counts
Create a data frame of all counts in this Experiment.
calc_shared_full
Create a data frame containing all scores across all selections.
calc_shared
Get the subset of scores that are shared across all Selections.
calc_scores
Combine the scores and standard errors within each condition.
calc_pvalues_wt
Calculate uncorrected pvalue for each variant compared to wild type.
calc_pvalues_pairwise
Calculate pvalues for each variant in each pair of Conditions.
write_tsv
Write each table from the store to its own tab-separated file.
See Also
--------
:py:class:`~enrich2.experiment.condition.Condition`
:py:class:`~enrich2.selection.selection.Selection`
"""
store_suffix = "exp"
treeview_class_name = "Experiment"
def __init__(self):
StoreManager.__init__(self)
self.conditions = list()
self._wt = None
@property
def wt(self):
"""
Property managing the wild type sequences of the
:py:class:`~enrich2.selection.selection.Selection` objects being
managed.
Returns
-------
:py:class:`~enrich2.sequence.wildtype.WildTypeSequence`
"""
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.selection_list()[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Experiment should not contain wild type "
"sequence [{}]".format(self.name)
)
else:
return None
def configure(self, cfg, configure_children=True, init_from_gui=False):
"""
Set up the :py:class:`~enrich2.experiment.experiment.Experiment`
using the *cfg* object, usually from a ``.json`` configuration file.
Parameters
----------
cfg : `dict` or :py:class:`~enrich2.config.types.ExperimentConfiguration`
The object used to configure this instance.
configure_children : `bool`
Traverse children and configure each one.
init_from_gui : `bool`
Allow this instance to be configured from a GUI.
"""
from ..config.types import ExperimentConfiguration
if isinstance(cfg, dict):
cfg = ExperimentConfiguration(cfg, init_from_gui)
elif not isinstance(cfg, ExperimentConfiguration):
raise TypeError("`cfg` was neither a " "ExperimentConfiguration or dict.")
StoreManager.configure(self, cfg.store_cfg)
if configure_children:
if len(cfg.condition_cfgs) == 0:
raise KeyError(
"Missing required config value {} [{}]"
"".format("conditions", self.name)
)
for cnd_cfg in cfg.condition_cfgs:
cnd = Condition()
cnd.configure(cnd_cfg)
self.add_child(cnd)
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
Returns
-------
`dict`
Dictionary of configuration options.
"""
cfg = StoreManager.serialize(self)
cfg[CONDITIONS] = [child.serialize() for child in self.children]
if self.get_root().scorer_class is not None:
cfg[SCORER] = {
SCORER_PATH: self.get_root().scorer_path,
SCORER_OPTIONS: self.get_root().scorer_class_attrs,
SCORER_PATH + " md5": compute_md5(self.get_root().scorer_path),
}
return cfg
def _children(self):
"""
Method bound to the ``children`` property. Returns a list of all
:py:class:`~enrich2.experiment.condition.Condition` objects
belonging to this object, sorted by name.
Returns
-------
`list`
List of sorted conditions.
"""
return sorted(self.conditions, key=lambda x: x.name)
def add_child(self, child):
"""
Add a condition to children conditions.
"""
if child.name in self.child_names():
raise ValueError(
"Non-unique condition name '{}' [{}]" "".format(child.name, self.name)
)
child.parent = self
self.conditions.append(child)
def remove_child_id(self, tree_id):
"""
Remove the reference to a
:py:class:`~enrich2.experiment.condition.Condition`
with Treeview id *tree_id*.
"""
self.conditions = [x for x in self.conditions if x.treeview_id != tree_id]
def selection_list(self):
"""
Return the :py:class:``~enrich2.selection.selection.Selection`
objects as a list.
Returns
-------
`list`
List of selection objects.
"""
selections = list()
for cnd in self.children:
selections.extend(cnd.children)
return selections
def validate(self):
"""
Calls validate on all child Conditions. Also checks the wild type
sequence status.
"""
# check the wild type sequences
if self.has_wt_sequence():
for child in self.selection_list()[1:]:
if self.selection_list()[0].wt != child.wt:
log_message(
logging_callback=logging.warning,
msg="Inconsistent wild type sequences",
extra={"oname": self.name},
)
break
for child in self.children:
child.validate()
def is_coding(self):
"""
Return ``True`` if the all
:py:class:`~enrich2.selection.selection.Selection` in the
:py:class:`~enrich2.experiment.experiment.Experiment`
count protein-coding variants, else
``False``.
Returns
-------
`bool`
`True` if all selections are coding.
"""
return all(x.is_coding() for x in self.selection_list())
def has_wt_sequence(self):
"""
Return ``True`` if the all
:py:class:`~enrich2.selection.selection.Selection` in the
:py:class:`~enrich2.experiment.experiment.Experiment`
have a wild type sequence, else
``False``.
Returns
-------
`bool`
`True` if all selections have a wildtype sequence.
"""
return all(x.has_wt_sequence() for x in self.selection_list())
def calculate(self):
"""
Calculate scores for all
:py:class:`~enrich2.selection.selection.Selection` objects, then
combine scores across selections within a condition to generate
score statistics.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all conditions [{}]" "".format(self.name)
)
for s in self.selection_list():
s.calculate()
self.combine_barcode_maps()
for label in self.labels:
self.calc_counts(label)
if self.get_root().scorer_class.name != "Counts Only":
self.calc_shared_full(label)
self.calc_shared(label)
self.calc_scores(label)
if label != "barcodes":
self.calc_pvalues_wt(label)
def combine_barcode_maps(self):
"""
Combine all barcode maps for
:py:class:`~enrich2.selection.selection.Selection` objects
into a single data frame and store it in ``'/main/barcodemap'``.
If multiple variants or IDs map to the same barcode, only the first one
will be present in the barcode map table.
The ``'/main/barcodemap'`` table is not created if no
:py:class:`~enrich2.selection.selection.Selection`
has barcode map information.
"""
if self.check_store("/main/barcodemap"):
return
bcm = None
for sel in self.selection_list():
if "/main/barcodemap" in list(sel.store.keys()):
if bcm is None:
bcm = sel.store["/main/barcodemap"]
else:
bcm = bcm.join(
sel.store["/main/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[pd.isnull(bcm)["value"]]
bcm.loc[new.index, "value"] = new["value.drop"]
bcm.drop("value.drop", axis="columns", inplace=True)
if bcm is not None:
bcm.sort_values("value", inplace=True)
self.store.put("/main/barcodemap", bcm, data_columns=bcm.columns)
def calc_counts(self, label):
"""
Create a data frame of all counts in this Experiment. This data frame
is not used for any calculations, but is provided to facilitate
exploration of the data set.
Parameters
----------
label : `str` {'barcodes', 'variants', 'identifiers', 'synonymous'}
A valid table label.
See Also
--------
:py:mod:`~enrich2.base.constants`
"""
if self.check_store("/main/{}/counts".format(label)):
return
idx = pd.IndexSlice
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
log_message(
logging_callback=logging.info,
msg="Creating column multi-index for counts ({})".format(label),
extra={"oname": self.name},
)
conditions_index = list()
selections_index = list()
values_index = list()
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(sel.timepoints))
selections_index.extend([sel.name] * len(sel.timepoints))
values_index.extend(["c_{}".format(x) for x in sorted(sel.timepoints)])
columns = pd.MultiIndex.from_tuples(
list(zip(conditions_index, selections_index, values_index)),
names=["condition", "selection", "timepoint"],
)
# create union index
log_message(
logging_callback=logging.info,
msg="Creating row index for counts ({})".format(label),
extra={"oname": self.name},
)
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
key="/main/{}/counts_unfiltered".format(label), columns=["index"]
).index
first = False
else:
combined = combined.join(
s.store.select(
key="/main/{}/counts_unfiltered".format(label),
columns=["index"],
).index,
how="outer",
)
# create and fill the data frames
log_message(
logging_callback=logging.info,
msg="Populating Experiment data frame with " "counts ({})".format(label),
extra={"oname": self.name},
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select(
key="/main/{}/counts_unfiltered".format(label)
)
for tp in sel.timepoints:
data.loc[:, idx[cnd.name, sel.name, "c_{}".format(tp)]] = sel_data[
"c_{}".format(tp)
]
self.store.put("/main/{}/counts".format(label), data)
def calc_shared_full(self, label):
"""
Use joins to create a data frame containing all scores across all
Selections in the Experiment.
Parameters
----------
label : `str` {'barcodes', 'variants', 'identifiers', 'synonymous'}
A valid table label.
See Also
--------
:py:mod:`~enrich2.base.constants`
"""
if self.check_store("/main/{}/scores_shared_full".format(label)):
return
idx = pd.IndexSlice
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
log_message(
logging_callback=logging.info,
msg="Creating column multi-index for scores ({})".format(label),
extra={"oname": self.name},
)
conditions_index = list()
selections_index = list()
values_index = list()
if self.get_root().scorer_class.name == "Ratios (Old Enrich)":
values_list = ["score"]
else:
values_list = ["score", "SE"]
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(values_list))
selections_index.extend([sel.name] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
list(zip(conditions_index, selections_index, values_index)),
names=["condition", "selection", "value"],
)
# create union index
log_message(
logging_callback=logging.info,
msg="Creating row index for scores ({})".format(label),
extra={"oname": self.name},
)
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
key="/main/{}/scores".format(label), columns=["index"]
).index
first = False
else:
combined = combined.join(
s.store.select(
key="/main/{}/scores".format(label), columns=["index"]
).index,
how="outer",
)
# create and fill the data frames
log_message(
logging_callback=logging.info,
msg="Populating Experiment data frame with " "scores ({})".format(label),
extra={"oname": self.name},
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select("/main/{}/scores".format(label))
for v in values_list:
data.loc[:, idx[cnd.name, sel.name, v]] = sel_data[v]
self.store.put("/main/{}/scores_shared_full".format(label), data)
def calc_shared(self, label):
"""
Get the subset of scores that are shared across all Selections in each
Condition.
Parameters
----------
label : `str` {'barcodes', 'variants', 'identifiers', 'synonymous'}
A valid table label.
See Also
--------
:py:mod:`~enrich2.base.constants`
"""
if self.check_store("/main/{}/scores_shared".format(label)):
return
log_message(
logging_callback=logging.info,
msg="Identifying subset shared across all " "Selections ({})".format(label),
extra={"oname": self.name},
)
data = self.store.select("/main/{}/scores_shared_full".format(label))
# identify variants found in all selections in at least two conditions
idx = pd.IndexSlice
complete = np.full(len(data.index), False, dtype=bool)
for cnd in data.columns.levels[0]:
mask_score = (
data.loc[:, idx[cnd, :, "score"]].notnull().sum(axis="columns") >= 2
)
complete = np.logical_or(complete, mask_score)
# try:
# mask_se = (data.loc[:, idx[cnd, :, 'SE']].notnull().sum(
# axis='columns') >= 2
# )
# complete = np.logical_or(complete, mask_se)
# except KeyError:
# pass
data = data.loc[complete]
self.store.put("/main/{}/scores_shared".format(label), data)
def calc_scores(self, label):
"""
Combine the scores and standard errors within each condition.
Parameters
----------
label : `str` {'barcodes', 'variants', 'identifiers', 'synonymous'}
A valid table label.
See Also
--------
:py:mod:`~enrich2.base.constants`
"""
if self.check_store("/main/{}/scores".format(label)):
return
log_message(
logging_callback=logging.info,
msg="Calculating per-condition scores ({})".format(label),
extra={"oname": self.name},
)
# set up new data frame
shared_index = self.store.select(
key="/main/{}/scores_shared".format(label), columns=["index"]
).index
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["score", "SE", "epsilon"])],
names=["condition", "value"],
)
data = | pd.DataFrame(np.nan, index=shared_index, columns=columns) | pandas.DataFrame |
#
# Analysis of the hvorg_movies
#
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import astropy.units as u
from sunpy.time import parse_time
import hvorg_style as hvos
plt.rc('text', usetex=True)
plt.rc('font', size=14)
figsize = (10, 5)
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Image output location
img = hvos.img
# application
application = 'helioviewer.org'
# data product
data_product = 'screenshots'
# Type of data we are looking at
data_analyzed = '{:s} {:s}'.format(application, data_product)
data_type = '{:s}'.format(data_analyzed)
# Time difference
f = os.path.join(directory, 'hvorg_screenshot_time_difference_seconds.npy')
td = np.load(f) * u.s
topicality_subtitle = "{:s} = {:s} - {:s}".format(hvos.durations['tmtopicality'][0], hvos.dates['Tmrequest'], hvos.dates['Tsdate'])
# Screenshot request times
f = os.path.join(directory, "hvorg_screenshot_request_time.pkl")
screenshot_request_time = pickle.load(open(f, 'rb'))
# Number of screenshots
nmovies = len(td)
# Figure 1 : topicality
# Scale size we are interested in
topicality_unit = u.year
# Define the topicality on the scale size
topicality = td.to(topicality_unit).value
# Histogram bins
topicality_bins = 100
# make the plot
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hist(topicality, bins=topicality_bins)
ax.grid(True, linestyle='dotted')
plt.yscale('log')
plt.xlabel(hvos.qlabel(hvos.durations['tmtopicality'][1], hvos.durations['tmtopicality'][0], str(topicality_unit)))
plt.ylabel(hvos.mlabel(len(td), data_type=data_product))
plt.title('{{{:s}}}\n{{{:s}}}'.format(data_type, topicality_subtitle))
plt.tight_layout()
filename = hvos.overleaf(os.path.join(data_type, 'topicality'))
filename = '{:s}.{:s}'.format(filename, hvos.imgfiletype)
filepath = os.path.join(img, filename)
plt.savefig(filepath)
# Figure 2: topicality < 30 days
# Scale size we are interested in
td_short_unit = u.day
# Longest possible topicality
td_short_limit = 30*u.day
# Find the topicalities less than the longest possible
topicality = td.to(td_short_unit)
these = np.abs(topicality) < td_short_limit
topicality = topicality[these].value
# Histogram bins
topicality_bins = int(td_short_limit.to(td_short_unit).value*24)
# Fix the bin size
td_short_fraction = 24
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hist(topicality, bins=topicality_bins)
ax.grid(True, linestyle='dotted')
rl = hvos.relevant_lines(hvos.lines, tr=[0, 30]*u.day)
for key in list(rl.keys()):
kwargs = rl[key]
kwargs['label'] = str(key)
plt.axvline(key.to(td_short_unit).value, **kwargs)
plt.yscale('log')
plt.xlabel(hvos.qlabel(hvos.durations['tmtopicality'][1], hvos.durations['tmtopicality'][0], str(td_short_unit)))
plt.ylabel(hvos.mlabel(len(topicality), data_type=data_product))
plt.title('{{{:s}}}\n{{{:s}}} {{{:s}}} {{{:s}}}'.format(data_type, topicality_subtitle, "$\le$", td_short_limit))
plt.legend()
plt.tight_layout()
filename = hvos.overleaf(os.path.join(data_type, 'topicality_{:s}'.format(str(td_short_limit))))
filename = '{:s}.{:s}'.format(filename, hvos.imgfiletype)
filepath = os.path.join(img, filename)
plt.savefig(filepath)
# Figure 6
# Number of requests as a function of time
title = 'screenshots per quarter'
df = | pd.DataFrame(screenshot_request_time, columns=['date']) | pandas.DataFrame |
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
from numpy import npv
from math import log10
from reo.models import ErrorModel
import pandas as pd
import numpy as np
import calendar
import datetime
def slope(x1, y1, x2, y2):
return (y2 - y1) / (x2 - x1)
def intercept(x1, y1, x2, y2):
return y2 - slope(x1, y1, x2, y2) * x2
def annuity(analysis_period, rate_escalation, rate_discount):
"""
this formulation assumes cost growth in first period
i.e. it is a geometric sum of (1+rate_escalation)^n / (1+rate_discount)^n
for n = 1,..., analysis_period
"""
x = (1 + rate_escalation) / (1 + rate_discount)
if x != 1:
pwf = round(x * (1 - x ** analysis_period) / (1 - x), 5)
else:
pwf = analysis_period
return pwf
def degradation_factor(analysis_period, rate_degradation):
if analysis_period == 0:
return 0
if analysis_period == 1:
return 1 - rate_degradation
factor = 1
factors = [factor]
for yr in range(1, int(analysis_period)):
factor *= (1 - rate_degradation)
factors.append(factor)
return sum(factors)/analysis_period
def annuity_escalation(analysis_period, rate_escalation, rate_discount):
'''
:param analysis_period: years
:param rate_escalation: escalation rate
:param rate_discount: discount rate
:return: present worth factor with escalation (inflation, or degradation if negative)
NOTE: assume escalation/degradation starts in year 2
'''
pwf = 0
for yr in range(1, int(analysis_period + 1)):
pwf += (1 + rate_escalation) ** (yr - 1) / (1 + rate_discount) ** yr
return pwf
def insert_u_bp(xp_array_incent, yp_array_incent, region, u_xbp, u_ybp, p, u_cap):
xp_array_incent[region].append(u_xbp)
yp_array_incent[region].append(u_ybp - u_ybp * p + u_cap)
return xp_array_incent, yp_array_incent
def insert_p_bp(xp_array_incent, yp_array_incent, region, p_xbp, p_ybp, u, p_cap):
xp_array_incent[region].append(p_xbp)
yp_array_incent[region].append(p_ybp - (p_cap + p_xbp * u))
return xp_array_incent, yp_array_incent
def insert_u_after_p_bp(xp_array_incent, yp_array_incent, region, u_xbp, u_ybp, p, p_cap, u_cap):
xp_array_incent[region].append(u_xbp)
if p_cap == 0:
yp_array_incent[region].append(u_ybp - (p * u_ybp + u_cap))
else:
yp_array_incent[region].append(u_ybp - (p_cap + u_cap))
return xp_array_incent, yp_array_incent
def insert_p_after_u_bp(xp_array_incent, yp_array_incent, region, p_xbp, p_ybp, u, u_cap, p_cap):
xp_array_incent[region].append(p_xbp)
if u_cap == 0:
yp_array_incent[region].append(p_ybp - (p_cap + u * p_xbp))
else:
yp_array_incent[region].append(p_ybp - (p_cap + u_cap))
return xp_array_incent, yp_array_incent
def setup_capital_cost_incentive(itc_basis, replacement_cost, replacement_year,
discount_rate, tax_rate, itc,
macrs_schedule, macrs_bonus_pct, macrs_itc_reduction):
""" effective PV and battery prices with ITC and depreciation
(i) depreciation tax shields are inherently nominal --> no need to account for inflation
(ii) ITC and bonus depreciation are taken at end of year 1
(iii) battery replacement cost: one time capex in user defined year discounted back to t=0 with r_owner
(iv) Assume that cash incentives reduce ITC basis
(v) Assume cash incentives are not taxable, (don't affect tax savings from MACRS)
(vi) Cash incentives should be applied before this function into "itc_basis".
This includes all rebates and percentage-based incentives besides the ITC
"""
# itc reduces depreciable_basis
depr_basis = itc_basis * (1 - macrs_itc_reduction * itc)
# Bonus depreciation taken from tech cost after itc reduction ($/kW)
bonus_depreciation = depr_basis * macrs_bonus_pct
# Assume the ITC and bonus depreciation reduce the depreciable basis ($/kW)
depr_basis -= bonus_depreciation
# Calculate replacement cost, discounted to the replacement year accounting for tax deduction
replacement = replacement_cost * (1-tax_rate) / ((1 + discount_rate) ** replacement_year)
# Compute savings from depreciation and itc in array to capture NPV
tax_savings_array = [0]
for idx, macrs_rate in enumerate(macrs_schedule):
depreciation_amount = macrs_rate * depr_basis
if idx == 0:
depreciation_amount += bonus_depreciation
taxable_income = depreciation_amount
tax_savings_array.append(taxable_income * tax_rate)
# Add the ITC to the tax savings
tax_savings_array[1] += itc_basis * itc
# Compute the net present value of the tax savings
tax_savings = npv(discount_rate, tax_savings_array)
# Adjust cost curve to account for itc and depreciation savings ($/kW)
cap_cost_slope = itc_basis - tax_savings + replacement
# Sanity check
if cap_cost_slope < 0:
cap_cost_slope = 0
return round(cap_cost_slope, 4)
def check_common_outputs(Test, d_calculated, d_expected):
"""
Used in tests to compare expected and API response values
:param Test: test class instance
:param d_calculated: dict of values from API (flat format)
:param d_expected: dict of expected values (flat format)
:return: None
"""
c = d_calculated
e = d_expected
try:
# check all calculated keys against the expected
for key, value in e.items():
tolerance = Test.REopt_tol
if key == 'npv':
tolerance = 2 * Test.REopt_tol
if key in c and key in e:
if (not isinstance(e[key], list) and isinstance(c[key], list)) or \
(isinstance(e[key], list) and not isinstance(c[key], list)):
Test.fail('Key: {0} expected type: {1} actual type {2}'.format(key, str(type(e[key])), str(type(c[key]))))
elif e[key] == 0:
Test.assertEqual(c[key], e[key], 'Key: {0} expected: {1} actual {2}'.format(key, str(e[key]), str(c[key])))
else:
if isinstance(e[key], float) or isinstance(e[key], int):
if key in ['batt_kw', 'batt_kwh']:
# variable rounding depends on scale of sizes
Test.assertAlmostEqual(c[key], e[key], -(int(log10(c[key]))))
else:
Test.assertTrue(abs((float(c[key]) - e[key]) / e[key]) < tolerance,
'Key: {0} expected: {1} actual {2}'.format(key, str(e[key]), str(c[key])))
else:
pass
else:
print("Warning: Expected value for {} not in calculated dictionary.".format(key))
if 'lcc_bau' in c and c['lcc_bau'] > 0:
# Total LCC BAU is sum of utility costs
Test.assertTrue(abs((float(c['lcc_bau'] or 0) - float(c['total_energy_cost_bau'] or 0) - float(c['total_min_charge_adder'] or 0)
- float(c['total_demand_cost_bau'] or 0) - float(c['existing_pv_om_cost_us_dollars'] or 0)
- float(c['total_fixed_cost_bau'] or 0)
- float(c['existing_gen_total_variable_om_cost_us_dollars'] or 0)
- float(c['existing_gen_total_fixed_om_cost_us_dollars'] or 0)
- float(c['existing_gen_total_fuel_cost_us_dollars'] or 0)
- float(c.get('total_boiler_fuel_cost_bau') or 0))
/ float(c['lcc_bau'] or 0)) < Test.REopt_tol,
"LCC_BAU doesn't add up to sum of individual costs")
except Exception as e:
print("check_common_outputs failed: {}".format(e.args[0]))
em = ErrorModel.objects.filter(run_uuid=c["run_uuid"]).first()
if em is not None:
raise Exception("""ErrorModel values:
task: \t {}
message: \t {}
traceback: \t {}
""".format(em.task, em.message, em.traceback)
)
else:
raise e
def generate_year_profile_hourly(year, consecutive_periods):
"""
This function creates a year-specific 8760 profile with 1.0 for timesteps which are defined in the relative_periods based on
generalized (non-year specific) datetime metrics. All other values are 0.0. This functions uses numpy, pandas, datetime, and calendar packages/libraries.
:param year: year for applying consecutive_periods changes based on year and leap years (cut off 12/31/year)
:param consecutive_periods: either list of dictionaries where each dict defines a period (keys = "month", "start_week_of_month", "start_day_of_week", "start_hour", "duration_hours"; length N periods)
OR can be a Pandas DataFrame with columns equivalent to the dict keys in which case it gets converted to list_of_dict. All of the value types are integers.
:return year_profile_hourly_list: 8760 profile with 1.0 for timesteps defined in consecutive_periods, else 0.0.
:return start_day_of_month_list: list of start_day_of_month which is calculated in this function
:return errors_list: used in validators.py - errors related to the input consecutive_periods and the year's calendar
"""
errors_list = []
# Create datetime series of the year, remove last day of the year if leap year
if calendar.isleap(year):
end_date = "12/31/"+str(year)
else:
end_date = "1/1/"+str(year+1)
dt_profile = pd.date_range(start='1/1/'+str(year), end=end_date, freq="1H", closed="left")
year_profile_hourly_series = pd.Series(np.zeros(8760), index=dt_profile)
# Check if the consecutive_periods is a list_of_dict or other (must be Pandas DataFrame), and if other, convert to list_of_dict
if not isinstance(consecutive_periods, list):
consecutive_periods = consecutive_periods.to_dict('records')
day_of_week_name = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
start_day_of_month_list = []
for i in range(len(consecutive_periods)):
start_month = int(consecutive_periods[i]["month"]) # One-indexed both user input and Calendar package
start_week_of_month = int(consecutive_periods[i]["start_week_of_month"] - 1) # One-indexed for user, but zero-index for Calendar
start_day_of_week = int(consecutive_periods[i]["start_day_of_week"] - 1) # Monday - Sunday is 1 - 7 for user, 0 - 6 for Calendar
start_hour = int(consecutive_periods[i]["start_hour"] - 1) # One-indexed for user, datetime hour is zero-index
duration_hours = int(consecutive_periods[i]["duration_hours"])
error_start_text = "Error in chp_unavailability_period {}. ".format(i+1)
try:
start_day_of_month = calendar.Calendar().monthdayscalendar(year=year,month=start_month)[start_week_of_month][start_day_of_week] # One-indexed
start_day_of_month_list.append(start_day_of_month)
if start_day_of_month == 0: # This may happen if there is no day_of_week in the 1st, 5th or 6th week of the month
raise DayOfWeekError("There is no start_day_of_week {} ({}) in week {} of month {} in the year {}. Remember, Monday is treated as the first day of the week.".format(start_day_of_week+1, day_of_week_name[start_day_of_week], start_week_of_month+1, start_month, year))
else:
start_datetime = datetime.datetime(year=year, month=start_month, day=start_day_of_month, hour=start_hour)
if start_datetime + datetime.timedelta(hours=duration_hours-1) > dt_profile[-1]:
raise DurationOverflowsYearError("The start day/time and duration_hours exceeds the end of the year. Please specify two separate unavailability periods: one for the beginning of the year and one for up to the end of the year.")
else:
year_profile_hourly_series[start_datetime:start_datetime + datetime.timedelta(hours=duration_hours-1)] = 1.0
except DayOfWeekError as e:
errors_list.append(error_start_text + str(e.args[0]))
except DurationOverflowsYearError as e:
errors_list.append(error_start_text + str(e.args[0]))
except:
errors_list.append(error_start_text + "Invalid set for month {} (1-12), start_week_of_month {} (1-4, possible 5 and 6), start_day_of_week {} (1-7), and start_hour_of_day {} (1-24) for the year {}.".format(start_month, start_week_of_month+1, start_day_of_week+1, start_hour+1, year))
if errors_list == []:
year_profile_hourly_list = list(year_profile_hourly_series)
else:
year_profile_hourly_list = []
return year_profile_hourly_list, start_day_of_month_list, errors_list
class DayOfWeekError(Exception):
pass
class DurationOverflowsYearError(Exception):
pass
def get_weekday_weekend_total_hours_by_month(year, year_profile_hourly_list):
"""
Get a summary of a yearly profile by calculating the weekday, weekend, and total hours by month (e.g. for chp_unavailability_periods viewing in the UI)
:param year for establishing the calendar
:param year_profile_hourly_list: list of 0's and 1's for tallying the metrics above; typically created using the generate_year_profile_hourly function
:return weekday_weekend_total_hours_by_month: nested dictionary with 12 keys (one for each month) each being a dictionary of weekday_hours, weekend_hours, and total_hours
"""
# Create datetime series of the year, remove last day of the year if leap year
if calendar.isleap(year):
end_date = "12/31/"+str(year)
else:
end_date = "1/1/"+str(year+1)
dt_profile = pd.date_range(start='1/1/'+str(year), end=end_date, freq="1H", closed="left")
year_profile_hourly_series = | pd.Series(year_profile_hourly_list, index=dt_profile) | pandas.Series |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
msg = "Length of ambiguous bool-array must be the same size as vals"
with pytest.raises(Exception, match=msg):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
msg = (
"The nonexistent argument must be one of "
"'raise', 'NaT', 'shift_forward', 'shift_backward' "
"or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, "+07:00")
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")
dr2 = DatetimeIndex(list(dr), name="foo", freq="D")
tm.assert_index_equal(dr, dr2)
def test_dti_construction_univalent(self):
rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern")
rng2 = DatetimeIndex(data=rng, tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_constructors(self, tzstr):
"""Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx2 = idx2._with_freq(None) # the others all have freq=None
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_timetz_accessor(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_dti_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H")
assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T")
assert t4.tz_convert(tz="UTC").freq == t4.freq
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
freq = "15min"
start = Timestamp("201710290100", tz=tz)
end = Timestamp("201710290300", tz=tz)
index = pd.date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
"201710290115",
"201710290130",
"201710290145",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290300",
],
tz=tz,
freq=freq,
ambiguous=[
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
)
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
def test_date_range_localize(self):
rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
rng3 = date_range("3/11/2012 03:00", periods=15, freq="H")
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng._with_freq(None), rng3)
# DST transition time
val = rng[0]
exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(
["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H"
)
tm.assert_index_equal(rng, rng2)
exp = | Timestamp("3/11/2012 00:00", tz="US/Eastern") | pandas.Timestamp |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
pa = pytest.importorskip("pyarrow", minversion="1.0.1")
from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES]
arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
"""
Fixture returning parametrized array from given dtype, including integer,
float and boolean
"""
return request.param
def test_arrow_array(data):
arr = pa.array(data)
expected = pa.array(
data.to_numpy(object, na_value=None),
type=pa.from_numpy_dtype(data.dtype.numpy_dtype),
)
assert arr.equals(expected)
def test_arrow_roundtrip(data):
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
def test_dataframe_from_arrow_types_mapper():
def types_mapper(arrow_type):
if pa.types.is_boolean(arrow_type):
return pd.BooleanDtype()
elif pa.types.is_integer(arrow_type):
return pd.Int64Dtype()
bools_array = pa.array([True, None, False], type=pa.bool_())
ints_array = pa.array([1, None, 2], type=pa.int64())
small_ints_array = pa.array([-1, 0, 7], type=pa.int8())
record_batch = pa.RecordBatch.from_arrays(
[bools_array, ints_array, small_ints_array], ["bools", "ints", "small_ints"]
)
result = record_batch.to_pandas(types_mapper=types_mapper)
bools = pd.Series([True, None, False], dtype="boolean")
ints = pd.Series([1, None, 2], dtype="Int64")
small_ints = pd.Series([-1, 0, 7], dtype="Int64")
expected = pd.DataFrame({"bools": bools, "ints": ints, "small_ints": small_ints})
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
df = pd.read_csv('pg.csv', parse_dates=['ACTDATS','FPEDATS'])
df = | pd.DataFrame(df) | pandas.DataFrame |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''All subclasses of DataframeNode'''
import itertools
import operator
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union # noqa: F401
import pandas as pd
from six.moves import reduce
from .bq_abstract_syntax_tree import (EMPTY_CONTEXT, EMPTY_NODE, # noqa: F401
AbstractSyntaxTreeNode, DataframeNode, DatasetType,
EvaluatableNode, EvaluationContext, Field,
MarkerSyntaxTreeNode, TableContext, _EmptyNode)
from .bq_types import (BQArray, BQStructType, BQType, TypedDataFrame, TypedSeries, # noqa: F401
implicitly_coerce)
from .evaluatable_node import Array, Selector, StarSelector, Value # noqa: F401
from .join import DataSource # noqa: F401
DEFAULT_TABLE_NAME = None
_OrderByType = List[Tuple[Field, str]]
_LimitType = Tuple[EvaluatableNode, EvaluatableNode]
class _WithTableContext(TableContext):
'''A TableContext augmented by a WITH clause.'''
def __init__(self, name, table, parent_context):
# type: (str, TypedDataFrame, TableContext) -> None
self.name = name
self.table = table
self.parent_context = parent_context
def lookup(self, path):
# type: (Tuple[str, ...]) -> Tuple[TypedDataFrame, Optional[str]]
'''Look up a path to a table in this context.'''
if len(path) == 1 and path[0] == self.name:
return self.table, self.name
if '.'.join(path) == self.name:
return self.table, path[-1]
return self.parent_context.lookup(path)
class QueryExpression(DataframeNode):
'''Highest level definition of a query.
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#sql-syntax
(see query_expr)
'''
def __init__(self,
with_clauses, # type: Union[_EmptyNode, List[Tuple[str, DataframeNode]]]
base_query, # type: DataframeNode
order_by, # type: Union[_EmptyNode, _OrderByType]
limit, # type: Union[_EmptyNode, _LimitType]
):
# type: (...) -> None
'''Set up QueryExpression node.
Args:
with_clauses: Optional WITH expression
base_query: Main part of query
order_by: Expression by which to order results
limit: Number of rows to return, possibly with an offset
'''
self.with_clauses = with_clauses
self.base_query = base_query
self.order_by = order_by
self.limit = limit
def _order_by(self, order_by, typed_dataframe, table_name, table_context):
# type: (_OrderByType, TypedDataFrame, Optional[str], TableContext) -> TypedDataFrame
'''If ORDER BY is specified, sort the data by the given column(s)
in the given direction(s).
Args:
typed_dataframe: The currently resolved query as a TypedDataFrame
table_name: Resolved name of current typed_dataframe
table_context: A representation of the state of available tables
Returns:
A new TypedDataFrame that is ordered by the given criteria
'''
context = EvaluationContext(table_context)
context.add_table_from_dataframe(typed_dataframe, table_name, EMPTY_NODE)
# order_by is a list of (field, direction) tuples to sort by
fields = []
directions = [] # ascending = True, descending = False
for field, direction in order_by:
if isinstance(field, Field):
path = '.'.join(context.get_canonical_path(field.path))
fields.append(path)
elif isinstance(field, Value):
if not isinstance(field.value, int):
raise ValueError('Attempt to order by a literal non-integer constant {}'
.format(field.value))
index = field.value - 1 # order by 1 means the first field, i.e. index 0
fields.append(context.table.dataframe.columns[index])
else:
raise ValueError('Invalid field specification {}'.format(field))
if direction == 'DESC':
directions.append(False)
else:
# Default sort order in Standard SQL is ASC
directions.append(True)
return TypedDataFrame(
context.table.dataframe.sort_values(fields, ascending=directions),
context.table.types)
def _limit(self, limit, typed_dataframe):
# type: (_LimitType, TypedDataFrame) -> TypedDataFrame
'''If limit is specified, only return that many rows.
If offset is specified, start at that row number, not the first row.
Args:
typed_dataframe: The currently resolved query as a TypedDataFrame
Returns:
A new TypedDataFrame that conforms to the given limit and offset
'''
limit_expression, offset_expression = limit
# Use empty context because the limit is a constant
limit_value = limit_expression.evaluate(EMPTY_CONTEXT)
if not isinstance(limit_value, TypedSeries):
raise ValueError("invalid limit expression {}".format(limit_expression))
limit, = limit_value.series
if offset_expression is not EMPTY_NODE:
# Use empty context because the offset is also a constant
offset_value = offset_expression.evaluate(EMPTY_CONTEXT)
if not isinstance(offset_value, TypedSeries):
raise ValueError("invalid offset expression {}".format(offset_expression))
offset, = offset_value.series
else:
offset = 0
return TypedDataFrame(
typed_dataframe.dataframe[offset:limit + offset],
typed_dataframe.types)
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
if not isinstance(self.with_clauses, _EmptyNode):
name_list = [name for name, _ in self.with_clauses]
if len(name_list) > len(set(name_list)):
raise ValueError("Duplicate names in WITH clauses are not allowed: {}"
.format(name_list))
for name, dataframe_node in self.with_clauses:
table_context = _WithTableContext(name,
dataframe_node.get_dataframe(table_context)[0],
table_context)
typed_dataframe, table_name = self.base_query.get_dataframe(table_context, outer_context)
if not isinstance(self.order_by, _EmptyNode):
typed_dataframe = self._order_by(
self.order_by, typed_dataframe, table_name, table_context)
if not isinstance(self.limit, _EmptyNode):
typed_dataframe = self._limit(self.limit, typed_dataframe)
return typed_dataframe, DEFAULT_TABLE_NAME
class SetOperation(DataframeNode):
'''Represents a set operation between two other query expressions - UNION, INTERSECT, etc.'''
def __init__(self, left_query, set_operator, right_query):
# type: (DataframeNode, str, DataframeNode) -> None
self.left_query = left_query
self.set_operator = set_operator
self.right_query = right_query
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
left_dataframe, unused_left_name = self.left_query.get_dataframe(
table_context, outer_context)
right_dataframe, unused_right_name = self.right_query.get_dataframe(
table_context, outer_context)
num_left_columns = len(left_dataframe.types)
num_right_columns = len(right_dataframe.types)
if num_left_columns != num_right_columns:
raise ValueError("Queries in {} ALL have mismatched column count: {} vs {}"
.format(self.set_operator, num_left_columns, num_right_columns))
combined_types = [implicitly_coerce(left_type, right_type)
for left_type, right_type in zip(left_dataframe.types,
right_dataframe.types)]
if self.set_operator == 'UNION_ALL':
return TypedDataFrame(
pd.concat([left_dataframe.dataframe,
# Rename second table to use first table's column names
right_dataframe.dataframe.rename(
columns=dict(zip(right_dataframe.dataframe.columns,
left_dataframe.dataframe.columns)))]),
combined_types), DEFAULT_TABLE_NAME
else:
raise NotImplementedError("set operation {} not implemented".format(self.set_operator))
def _evaluate_fields_as_dataframe(fields, context):
# type: (Sequence[EvaluatableNode], EvaluationContext) -> TypedDataFrame
'''Evaluates a list of expressions and constructs a TypedDataFrame from the result.
Args:
fields: A list of expressions (evaluatable abstract syntax tree nodes)
context: The context to evaluate the expressions
Returns:
A TypedDataFrame consisting of the results of the evaluation.
'''
# Evaluates each of the given fields to get a list of tables and/or
# single columns
evaluated_fields = [field.evaluate(context) for field in fields]
# Creates one large table out of each of the evaluated field
# tables/columns
types = reduce(operator.add,
[field.types for field in evaluated_fields], []) # type: List[BQType]
combined_evaluated_data = (
pd.concat([field.dataframe for field in evaluated_fields], axis=1)
if evaluated_fields else pd.DataFrame([]))
return TypedDataFrame(combined_evaluated_data, types)
class Select(MarkerSyntaxTreeNode, DataframeNode):
'''SELECT query to retrieve rows from a table(s).
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select-list
'''
def __init__(self, modifier, # type: AbstractSyntaxTreeNode
fields, # type: Sequence[Union[Selector, StarSelector]]
from_, # type: Union[_EmptyNode, DataSource]
where, # type: Union[_EmptyNode, EvaluatableNode]
group_by, # type: Union[_EmptyNode, List[Union[Value, Field]]]
having # type: Union[_EmptyNode, EvaluatableNode]
):
# type: (...) -> None
'''Set up SELECT node.
Args:
modifier: Optional ALL or DISTINCT modifier
fields: Columns to return
from_: Table/expression from which to retrieve rows
where: WHERE filter condition, if any
group_by: GROUP BY grouping condition, if any
having: HAVING filter condition, if any
'''
self.modifier = modifier
self.fields = fields
for i, field in enumerate(self.fields):
field.position = i + 1 # position is 1-up, i.e the first selector is position #1.
self.from_ = from_
self.where = where
if isinstance(group_by, _EmptyNode):
self.group_by = group_by # type: Union[_EmptyNode, List[Field]]
else:
self.group_by = []
for grouper in group_by:
if isinstance(grouper, Value):
if not isinstance(grouper.value, int):
raise ValueError('Attempt to group by a literal non-integer constant {}'
.format(grouper.value))
# GROUP BY 3 means group by the third field in the select, the field at index 2,
# i.e. we have to subtract one from the user-specified value to get the index.
# We construct a one-element field path just as if they'd specified the name
# of the corresponding field.
nth_field = self.fields[grouper.value - 1]
if not isinstance(nth_field, Selector):
raise ValueError('cannot GROUP BY {}th selector'.format(grouper.value))
grouper_path = (nth_field.name(),)
self.group_by.append(Field(grouper_path))
else:
self.group_by.append(grouper)
self.having = having
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''Scope the given datasets by the criteria specified in the
instance's fields.
Args:
table_context: All the tables in the database
outer_context: The context of the outer query, if this Select is a subquery;
otherwise None
Returns:
Tuple of the resulting table (TypedDataFrame) and a name for
this table
'''
if isinstance(self.from_, _EmptyNode):
context = EvaluationContext(table_context)
else:
context = self.from_.create_context(table_context)
if outer_context:
context.add_subcontext(outer_context)
# Expand out any * fields so that we have a list just of selectors.
expanded_fields = list(itertools.chain(*[
[selector] if isinstance(selector, Selector)
else selector.get_selectors(context)
for selector in self.fields]))
context.selector_names = [
selector.name() for selector in self.fields if isinstance(selector, Selector)]
if not isinstance(self.where, _EmptyNode):
# Filter table by WHERE condition
rows_to_keep = self.where.evaluate(context)
if not isinstance(rows_to_keep, TypedSeries):
raise ValueError("Invalid WHERE expression {}".format(rows_to_keep))
context.table = TypedDataFrame(
context.table.dataframe.loc[rows_to_keep.series],
context.table.types)
if not isinstance(self.group_by, _EmptyNode):
fields_for_evaluation = context.do_group_by(
expanded_fields, self.group_by) # type: Sequence[EvaluatableNode]
else:
fields_for_evaluation = expanded_fields
result = _evaluate_fields_as_dataframe(fields_for_evaluation, context)
if not isinstance(self.having, _EmptyNode):
having_context = EvaluationContext(table_context)
having_context.add_table_from_dataframe(result, None, EMPTY_NODE)
having_context.add_subcontext(context)
having_context.group_by_paths = context.group_by_paths
having = self.having.mark_grouped_by(context.group_by_paths, having_context)
rows_to_keep = having.evaluate(having_context)
if not isinstance(rows_to_keep, TypedSeries):
raise ValueError("Invalid HAVING expression {}".format(rows_to_keep))
result = TypedDataFrame(result.dataframe.loc[rows_to_keep.series], result.types)
if self.modifier == 'DISTINCT':
result = TypedDataFrame(result.dataframe.drop_duplicates(), result.types)
return result, DEFAULT_TABLE_NAME
class TableReference(DataframeNode):
'''A table reference specified as Project.Dataset.Table (or possibly
Dataset.Table or just Table if there is only one project and/or dataset).
'''
def __init__(self, path):
# type: (Tuple[str, ...]) -> None
# If the table reference is specified with backticks, it will be parsed
# as one element, so we need to split into parts here.
if len(path) == 1:
split_path = path[0].split('.') # type: List[str]
path = tuple(split_path)
self.path = path # type: Tuple[str, ...]
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
del outer_context # Unused
return table_context.lookup(self.path)
class Unnest(DataframeNode, MarkerSyntaxTreeNode):
'''An expression unnesting an array into a column of data.'''
def __init__(self, array_node):
# type: (Array) -> None
self.array_node = array_node
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
del outer_context # Unused
context = EvaluationContext(table_context)
result = self.array_node.evaluate(context)
if isinstance(result, TypedDataFrame):
raise ValueError('UNNEST({}) did not result in one column'.format(self.array_node))
result_type, = result.types
if not isinstance(result_type, BQArray):
raise ValueError("UNNESTing a non-array-typed value: {}".format(result_type))
contained_type = result_type.type_
if len(result.series) != 1:
raise ValueError('UNNEST({}) did not result in one row'.format(self.array_node))
result_array, = result.to_list()
if not isinstance(result_array, tuple):
raise ValueError("UNNEST({}) resulted in {!r} rather than an array"
.format(self.array_node, result_array))
if isinstance(contained_type, BQStructType):
i = 0
columns = []
for field in contained_type.fields:
if field:
columns.append(field)
else:
columns.append('f{}_'.format(i))
i += 1
result_dataframe = TypedDataFrame(
pd.DataFrame([[cell for cell in row] for row in result_array], columns=columns),
contained_type.types)
else:
result_dataframe = TypedDataFrame(
| pd.DataFrame([[cell] for cell in result_array], columns=['f0_']) | pandas.DataFrame |
import copy
import itertools
import os
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import PowerTransformer
from scipy.stats import yeojohnson
from tqdm import tqdm
import tensorflow as tf
import warnings
warnings.simplefilter("ignore")
n_wavelengths = 55
n_timesteps = 300
class read_Ariel_dataset():
def __init__(self, noisy_path_train, noisy_path_test, params_path, start_read):
"""
For reading Ariel Dataset.
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy training files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param noisy_path_train: (str) The *relative path's parent directory* from the current
working directory to all noisy test files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param params_path: (str) The *relative path's parent directory* from the current
working directory to all params files. For local files start with "./", for
colab files alternatively start with "/content/" (and "./" works fine).
:param start_read: (int) How many data points to replace at the beginning of the
file. Used for preprocessing of files by replacing values before start_read
with 1.0 to minimize impact of the drop valley.
"""
super().__init__()
self.noisy_path = noisy_path_train
self.noisy_path_test = noisy_path_test
self.params_path = params_path
self.start_read = start_read
# list all files in path(s).
self.noisy_list= os.listdir(self.noisy_path)
self.noisy_list_test = os.listdir(self.noisy_path_test)
self.params_list = os.listdir(self.params_path)
# Grouped by AAAA:
self.group_noisy_list = self._group_list(self.noisy_list)
self.group_noisy_list_test = self._group_list(self.noisy_list_test)
self.group_params_list = self._group_list(self.params_list)
def _group_list_return(self):
"""
Only used for unit test purposes.
Return self.group_noisy_list and assert it is true.
"""
return self.group_noisy_list
def _choose_train_or_test(self, folder="noisy_train", batch_size=1):
"""Private function to choose train or test.
:param batch_size (int): The batch size to take. NotImplemented yet.
"""
if folder == "noisy_train":
path = self.noisy_path
files = self.noisy_list
elif folder == "noisy_test":
path = self.noisy_path_test
files = self.noisy_list_test
else:
raise FileNotFoundError("Not in the list (noisy_train, noisy_test). "
"Please input the choices in the list stated and try again.")
return path, files
def _len_noisy_list(self):
return len(self.noisy_list)
def unoptimized_read_noisy(self, folder="noisy_train", **kwargs):
"""
Read noisy files greedily, stacking them on the first axis.
First axis is the time series axis. So a file with 300x55, read
3 files would be 900x55.
:param folder (str): Which folder to do baseline transition. Choices:
"noisy_train" (default), "noisy_test".
"""
path, files = self._choose_train_or_test(folder=folder, **kwargs)
predefined = pd.DataFrame()
for item in files:
# Concatenate filename and their parent folder.
relative_file_path = path + "/" + item
# Renaming the columns
names = [item[-14:-4] + f"_{i}" for i in range(n_timesteps)]
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=6, header=None)
curr.rename(columns={x: y for x, y in zip(curr.columns, names)}, inplace=True)
# Concatenating the pandas.
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def unoptimized_read_params(self):
"""
Read params files greedily, stacking them on the first axis.
"""
predefined = pd.DataFrame()
for item in self.params_list:
# Relative file path:
relative_file_path = self.params_path + "/" + item
names = [item[-14:-4]] # Have to be a list to take effect
curr = pd.read_csv(relative_file_path, delimiter="\t", skiprows=2, header=None).T
curr.rename(columns = {x: y for x, y in zip(curr.columns, names)}, inplace=True)
predefined = pd.concat([predefined, curr], axis=1)
return predefined
def _group_list(self, mylist):
"""
Group list together. Here the function is specific to group AAAA together into
a sublist to not cramp the memory and dataframe I/O.
"""
return [list(v) for i, v in itertools.groupby(mylist, lambda x: x[:4])]
def read_noisy_extra_param(self, folder="train", saveto="./feature_store/noisy_train"):
"""
Read the extra 6 stellar and planet parameters in noisy files.
:param folder (str): "train" or "test" choice. Default "train" for noisy train set.
:param saveto (str): The directory to save to. Will make the directory if not
already exists.
"""
header = ["star_temp", "star_logg", "star_rad", "star_mass", "star_k_mag", "period"]
predefined = pd.DataFrame()
if saveto[-1] != "/":
saveto += "/"
try:
os.makedirs(saveto)
except OSError as e:
pass
if folder == "train":
path = self.noisy_path
mylist = self.group_noisy_list
elif folder == "test":
path = self.noisy_path_test
mylist = self.group_noisy_list_test
else:
raise ValueError("Invalid 'folder' entry. Please choose between 'train' or 'test'.")
# To ensure small enough, read them into groups of csv first.
for grouped_item in tqdm(mylist):
for item in grouped_item:
temp_storage_float = []
relative_file_path = path + "/" + item
with open(relative_file_path, "r") as f:
temp_storage_str = list(itertools.islice(f, 6))
# Preprocess for numbers only
for string in temp_storage_str:
# Separate the digits and the non-digits.
new_str = ["".join(x) for _, x in itertools.groupby(string, key=str.isdigit)]
# Only new_str[0] is the one we want to omit.
# We want to join back into a single string because "." previously is classifed
# as non-digit.
new_str = "".join(new_str[1:])
# Convert to float.
temp_storage_float.append(float(new_str))
# Convert to pandas DataFrame.
temp_storage_float = pd.DataFrame(temp_storage_float)
# Define file name
names = [item[-14:-4]]
# Change the column name
temp_storage_float.rename(columns =
{x: y for x, y in zip(temp_storage_float.columns, names)},
inplace=True
)
# Change the row names for predefined (optional for readability)
temp_storage_float.rename(index = {x: y for x, y in zip(range(6), header)},
inplace=True)
predefined = | pd.concat([predefined, temp_storage_float], axis=1) | pandas.concat |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
| tm.assert_numpy_array_equal(conv.values, fromdates.values) | pandas._testing.assert_numpy_array_equal |
# Library for final plots used in the paper
# Created on: Jan 7, 2021
# Author: <NAME>
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join
import numpy as np
from scipy.stats import norm, stats, spearmanr
import pylab as pl
import matplotlib.ticker as mtick
import math
import matplotlib as mpl
from datetime import datetime, timezone
from dateutil import tz
from scipy.stats import zscore
from matplotlib import ticker
from scipy.signal import find_peaks
from matplotlib.lines import Line2D
mpl.use("pgf")
text_size = 14
plt.rcParams.update({'font.size': text_size})
plt.rc('xtick',labelsize=text_size)
plt.rc('ytick',labelsize=text_size)
preamble = [r'\usepackage{fontspec}',
r'\usepackage{physics}']
params = {'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex',
'pgf.preamble': preamble}
mpl.rcParams.update(params)
def plot_run_spread_temporally(path_to_cluster_info, save_dir):
'''
Plots the temporal behavior of runs in the clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering temporal run information.
Returns
-------
None
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
range = []
for n in df['Total Time']:
if(n<86400):
range.append('<1d')
elif(n<259200):
range.append('1-\n3d')
elif(n<604800):
range.append('3d-\n1w')
elif(n<(2592000/2)):
range.append('1w-\n2w')
elif(n<2592000):
range.append('2w-\n1M')
elif(n<7776000):
range.append('1-\n3M')
elif(n<15552000):
range.append('3-\n6M')
else:
print("don't forget: %d"%n)
df['Range'] = range
read_df = df[df['Operation']=='Read']
write_df = df[df['Operation']=='Write']
rm = np.median(read_df[read_df['Range']=='1w-\n2w']['Temporal Coefficient of Variation'])
wm = np.median(write_df[write_df['Range']=='1w-\n2w']['Temporal Coefficient of Variation'])
print('Median for read at 1-2w: %.3f'%rm)
print('Median for write at 1-2w: %.3f'%wm)
# Barplot of time periods to temporal CoV
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,1.8])
fig.subplots_adjust(left=0.19, right=0.990, top=0.96, bottom=0.48, wspace=0.03)
order = ['<1d', '1-\n3d', '3d-\n1w', '1w-\n2w', '2w-\n1M', '1-\n3M', '3-\n6M']
PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], x='Range', y='Temporal Coefficient of Variation', data=read_df, order=order, color='skyblue', fliersize=0, **PROPS)
PROPS = {'boxprops':{'facecolor':'maroon', 'edgecolor':'black'}, 'medianprops':{'color':'white', 'linewidth': 1.25},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], x='Range', y='Temporal Coefficient of Variation', data=write_df, order=order,color='maroon', fliersize=0, **PROPS)
# iterate over boxes
for i,box in enumerate(axes[0].artists):
box.set_edgecolor('black')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.005, 0.45, 'Inter-arrival\nTimes CoV', rotation=90)
axes[0].set_xlabel('')
axes[1].set_xlabel('')
fig.text(0.38, 0.13, '(a) Read', ha='center')
fig.text(0.80, 0.13, '(b) Write', ha='center')
fig.text(0.58, 0.03, 'Cluster Time Span', ha='center')
#fig.text(0.001, 0.65, "Performance\nCoV (%)", rotation=90, va='center', multialignment='center')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_yticks([0,1000,2000,3000])
axes[0].set_ylim(0,3000)
plt.savefig(join('./time_period_v_temp_cov.pdf'))
plt.close()
plt.clf()
def plot_run_spread_span_frequency(path_to_cluster_info, save_dir):
'''
Plots the temporal behavior of runs in the clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering temporal run information.
Returns
-------
None
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
read_df = df[df['Operation']=='Read']
write_df = df[df['Operation']=='Write']
# CDF of time periods and frequency
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5, 2.2])
fig.subplots_adjust(left=0.15, right=0.965, top=.94, bottom=0.34, wspace=0.05)
read_info = read_df['Total Time']/86400
write_info = write_df['Total Time']/86400
read_median = np.median(read_info)
write_median = np.median(write_info)
read_info = np.log10(read_info)
write_info = np.log10(write_info)
read_median_plotting = np.median(read_info)
write_median_plotting = np.median(write_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
print(cdf_write[100])
axes[0].plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
axes[0].plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
axes[0].set_ylabel('CDF of Clusters')
axes[0].set_xlabel('(a) Cluster Time\nSpan (days)')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[0].set_ylim(0,1)
axes[0].set_xlim(0,3)
axes[0].set_yticks(np.arange(0,1.2,0.25))
positions = [1, 2, 3]
labels = ['$10^1$', '$10^2$', '$10^3$']
axes[0].xaxis.set_major_locator(ticker.FixedLocator(positions))
axes[0].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
vals = axes[0].get_yticks()
axes[0].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
axes[0].set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
axes[0].axvline(np.log10(4), color='skyblue', zorder=0, linestyle='--', linewidth=2)
axes[0].axvline(write_median_plotting, color='maroon', zorder=0, linestyle=':', linewidth=2)
print("Median of Read: %f"%read_median)
print("Median of Write: %f"%write_median)
# Add legend
axes[0].legend(loc='lower right', fancybox=True)
read_info = read_df['Average Runs per Day'].tolist()
write_info = write_df['Average Runs per Day'].tolist()
read_median = np.median(read_info)
write_median = np.median(write_info)
read_info = np.log10(read_info)
write_info = np.log10(write_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
axes[1].plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
axes[1].plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
axes[1].set_xlabel('(b) Run Frequency\n(runs/day)')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].set_axisbelow(True)
axes[1].set_ylim(0,1)
axes[1].set_xlim(0,3)
axes[1].set_yticks(np.arange(0,1.2,0.25))
positions = [1, 2, 3]
labels = ['$10^1$', '$10^2$', '$10^3$']
axes[1].xaxis.set_major_locator(ticker.FixedLocator(positions))
axes[1].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
vals = axes[0].get_yticks()
axes[1].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
axes[1].set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
axes[1].axvline(np.log10(read_median), color='skyblue', zorder=0, linestyle='--', linewidth=2)
axes[1].axvline(np.log10(write_median), color='maroon', zorder=0, linestyle=':', linewidth=2)
print("Median of Read: %f"%read_median)
print("Median of Write: %f"%write_median)
# Add legend
axes[0].legend(loc='lower right', fancybox=True)
#axes[1].get_legend().remove()
plt.savefig(join(save_dir, 'time_periods_freq.pdf'))
plt.close()
plt.clf()
return None
def plot_time_of_day_v_perf(path_to_data, save_dir):
'''
Plots time period effects on performance.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
range_tod = []
range_tow = []
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/Chicago')
for n in df['Start Time']:
datetime_time = datetime.fromtimestamp(n).replace(tzinfo=from_zone).astimezone(to_zone)
h = int(datetime_time.hour)
d = int(datetime_time.weekday())
# Group by time of day
'''
if(h == 0 or h == 1 or h == 2):
range_tod.append('12-\n3am')
elif(h == 3 or h == 4 or h == 5):
range_tod.append('3-\n6am')
elif(h == 6 or h == 7 or h == 8):
range_tod.append('6-\n9am')
elif(h == 9 or h == 10 or h == 11):
range_tod.append('9am-\n12pm')
elif(h == 12 or h == 13 or h == 14):
range_tod.append('12-\n3pm')
elif(h == 15 or h == 16 or h == 17):
range_tod.append('3-\n6pm')
elif(h == 18 or h == 19 or h == 20):
range_tod.append('6-\n9pm')
elif(h == 21 or h == 22 or h == 23):
range_tod.append('9pm-\n12am')
else:
print("don't forget: %d"%n)
'''
if(h == 0 or h == 1 or h == 2):
range_tod.append('0-\n3')
elif(h == 3 or h == 4 or h == 5):
range_tod.append('3-\n6')
elif(h == 6 or h == 7 or h == 8):
range_tod.append('6-\n9')
elif(h == 9 or h == 10 or h == 11):
range_tod.append('9-\n12')
elif(h == 12 or h == 13 or h == 14):
range_tod.append('12-\n15')
elif(h == 15 or h == 16 or h == 17):
range_tod.append('15-\n18')
elif(h == 18 or h == 19 or h == 20):
range_tod.append('18-\n21')
elif(h == 21 or h == 22 or h == 23):
range_tod.append('21-\n24')
else:
print("don't forget: %d"%n)
# Now for time of week
if(d == 0):
range_tow.append('Mo')
elif(d == 1):
range_tow.append('Tu')
elif(d == 2):
range_tow.append('We')
elif(d == 3):
range_tow.append('Th')
elif(d == 4):
range_tow.append('Fr')
elif(d == 5):
range_tow.append('Sa')
elif(d == 6):
range_tow.append('Su')
else:
print("don't forget: %d"%n)
df['Range, Time of Day'] = range_tod
df['Range, Time of Week'] = range_tow
# Rid of outliers to make cleaner plots
order = ['0-\n3', '3-\n6', '6-\n9', '9-\n12', '12-\n15', '15-\n18', '18-\n21', '21-\n24']
df_tod = pd.DataFrame(columns=['Range, Time of Day', 'Operation', 'Performance Z-Score'])
for tod in order:
working_df = df[df['Range, Time of Day']==tod].reset_index(drop=True)
working_df['Z-Score of Z-Scores'] = (working_df['Performance Z-Score'] - working_df['Performance Z-Score'].mean())/working_df['Performance Z-Score'].std(ddof=0)
working_df = working_df[working_df['Z-Score of Z-Scores'] < 2]
working_df = working_df[working_df['Z-Score of Z-Scores'] > -2]
working_df = working_df.drop(labels=['Application', 'Cluster Number', 'Start Time', 'Range, Time of Week', 'Z-Score of Z-Scores'], axis='columns')
df_tod = df_tod.append(working_df, ignore_index=True)
df_tow = pd.DataFrame(columns=['Range, Time of Week', 'Operation', 'Performance Z-Score'])
for tow in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']:
working_df = df[df['Range, Time of Week']==tow].reset_index(drop=True)
working_df['Z-Score of Z-Scores'] = (working_df['Performance Z-Score'] - working_df['Performance Z-Score'].mean())/working_df['Performance Z-Score'].std(ddof=0)
working_df = working_df[working_df['Z-Score of Z-Scores'] < 2]
working_df = working_df[working_df['Z-Score of Z-Scores'] > -2]
working_df = working_df.drop(labels=['Application', 'Cluster Number', 'Start Time', 'Range, Time of Day', 'Z-Score of Z-Scores'], axis='columns')
df_tow = df_tow.append(working_df, ignore_index=True)
# Barplot of time of day to performance CoV
read_df = df_tod[df_tod['Operation']=='Read']
write_df = df_tod[df_tod['Operation']=='Write']
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,2])
fig.subplots_adjust(left=0.16, right=0.990, top=0.96, bottom=0.45, wspace=0.03)
#order = ['12-\n3am', '3-\n6am', '6-\n9am', '9am-\n12pm', '12-\n3pm', '3-\n6pm', '6-\n9pm', '9pm-\n12am']
#PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
# 'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.violinplot(ax=axes[0], x='Range, Time of Day', y='Performance Z-Score', data=read_df, order=order, color='skyblue', inner='quartile', linewidth=2)
sns.violinplot(ax=axes[1], x='Range, Time of Day', y='Performance Z-Score', data=write_df, order=order, color='maroon', inner='quartile', linewidth=2)
#violins = [art for art in axes[0].get_children()]
#for i in range(len(violins)):
# violins[i].set_edgecolor('black')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.37, 0.14, '(a) Read', ha='center')
fig.text(0.78, 0.14, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'Time of Day (24-hr)', ha='center')
fig.text(0.001, 0.65, "Performance\nZ-Score", rotation=90, va='center', multialignment='center')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(-3,3)
axes[0].set_yticks(range(-3,4,1))
axes[0].tick_params(axis='x', labelsize=13)
axes[1].tick_params(axis='x', labelsize=13)
axes[0].tick_params(axis='y', labelsize=14)
axes[1].tick_params(axis='y', labelsize=14)
for l in axes[0].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[0].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[1].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.8)
for l in axes[1].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'time_day_v_perf.pdf'))
plt.close()
plt.clf()
# Barplot of time of week to performance CoV
read_df = df_tow[df_tow['Operation']=='Read']
write_df = df_tow[df_tow['Operation']=='Write']
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,1.9])
fig.subplots_adjust(left=0.16, right=0.990, top=0.96, bottom=0.38, wspace=0.03)
order = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
sns.violinplot(ax=axes[0], x='Range, Time of Week', y='Performance Z-Score', data=read_df, order=order, color='skyblue', inner='quartile', edgecolor='black')
sns.violinplot(ax=axes[1], x='Range, Time of Week', y='Performance Z-Score', data=write_df, order=order, color='maroon', inner='quartile')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.37, 0.135, '(a) Read', ha='center')
fig.text(0.78, 0.135, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'Day of Week', ha='center')
fig.text(0.001, 0.65, "Performance\nZ-Score", rotation=90, va='center', multialignment='center')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(-3,3)
axes[0].set_yticks(range(-3,4,1))
axes[0].tick_params(axis='x', labelsize=14)
axes[1].tick_params(axis='x', labelsize=14)
axes[0].tick_params(axis='y', labelsize=14)
axes[1].tick_params(axis='y', labelsize=14)
for l in axes[0].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[0].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[1].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.8)
for l in axes[1].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'time_week_v_perf.pdf'), backend='pgf')
plt.close()
plt.clf()
return None
def plot_no_user_app_characterizations(path_to_data, path_to_normal_data, save_dir):
'''
Plots the cluster characterizations of clusters formed without being separated
by user and application.
Parameters
----------
path_to_data: string
Path to directory with data from clusters without user/app sorting.
path_to_normal_data: string
Path to directory with data from clusters with user/app sorting.
save_dir: string
Path to the directory to save the plots in.
Returns
-------
None
'''
# Plot CoV of cluster sizes
path = join(path_to_data, 'no_runs_in_clusters_read.txt')
with open(path, 'r') as f:
no_read_clusters = f.read().split("\n")
f.close()
no_read_clusters = pd.Series(no_read_clusters).astype(int)
no_read_clusters = no_read_clusters[no_read_clusters > 40]
path = join(path_to_data, 'no_runs_in_clusters_write.txt')
with open(path, 'r') as f:
no_write_clusters = f.read().split("\n")
f.close()
no_write_clusters = pd.Series(no_write_clusters).astype(int)
no_write_clusters = no_write_clusters[no_write_clusters > 40]
path = join(path_to_normal_data, 'no_runs_in_clusters_read.txt')
with open(path, 'r') as f:
no_read_clusters_o = f.read().split("\n")
f.close()
no_read_clusters_o = pd.Series(no_read_clusters_o).astype(int)
no_read_clusters_o = no_read_clusters_o[no_read_clusters_o > 40]
path = join(path_to_normal_data, 'no_runs_in_clusters_write.txt')
with open(path, 'r') as f:
no_write_clusters_o = f.read().split("\n")
f.close()
no_write_clusters_o = pd.Series(no_write_clusters_o).astype(int)
no_write_clusters_o = no_write_clusters_o[no_write_clusters_o > 40]
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[12,5])
fig.subplots_adjust(left=0.075, right=0.992, top=0.97, bottom=0.12, wspace=0.07)
n_bins = 10000
plt.setp(axes, xlim=(40,3000))
hist = np.histogram(no_read_clusters, bins=range(max(no_read_clusters)+1))[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
hist = np.histogram(no_write_clusters, bins=range(max(no_write_clusters)+1))[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
hist = np.histogram(no_read_clusters_o, bins=range(max(no_read_clusters_o)+1))[0]
cdf_read_o = np.cumsum(hist)
cdf_read_o = [x/cdf_read_o[-1] for x in cdf_read_o]
hist = np.histogram(no_write_clusters_o, bins=range(max(no_write_clusters_o)+1))[0]
cdf_write_o = np.cumsum(hist)
cdf_write_o = [x/cdf_write_o[-1] for x in cdf_write_o]
axes[0].plot(cdf_read, color='skyblue', label='False', linewidth=4)
axes[0].plot(cdf_read_o, color='mediumseagreen', label='True', linewidth=4, linestyle='--')
axes[1].plot(cdf_write, color='maroon', label='False', linewidth=4)
axes[1].plot(cdf_write_o, color='gold', label='True', linewidth=4, linestyle='--')
axes[0].set_ylim(0,1)
vals = axes[0].get_yticks()
axes[0].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
axes[0].set_ylabel('Percent of Clusters')
axes[0].set_xlabel('Number of Runs in a Read Cluster')
axes[1].set_xlabel('Number of Runs in a Write Cluster')
axes[0].legend(title='Clustered by Application', loc='lower right')
axes[1].legend(title='Clustered by Application', loc='lower right')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
ticks = [40, 500, 1000, 1500, 2000, 2500, 3000]
axes[0].set_xticks(ticks)
axes[1].set_xticks(ticks)
plt.savefig(join(save_dir, 'cluster_sizes_no_user_app.pdf'))
plt.clf()
plt.close()
def plot_no_runs_v_no_clusters(path_to_data, save_dir):
'''
shows correlation between an application having more runs and cluster.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
df = df[df['Number of Runs']!=0]
fig, ax = plt.subplots(1, 1, figsize=[6,3])
fig.subplots_adjust(left=0.115, right=0.97, top=.95, bottom=0.21, wspace=0.03)
ax.set_ylim(-.1,3)
ax.set_xlim(1,5)
df['Number of Clusters'] = np.log10(df['Number of Clusters'])
df['Number of Runs'] = np.log10(df['Number of Runs'])
sns.regplot(data=df[df['Operation']=='Read'], x='Number of Runs', y='Number of Clusters', color='skyblue', ax=ax,
ci=None, order=0, label='Read', scatter_kws={'edgecolors':'black', 'zorder':1}, line_kws={'zorder':0})
sns.regplot(data=df[df['Operation']=='Write'], x='Number of Runs', y='Number of Clusters', color='maroon', ax=ax,
ci=None, order=0, label='Write', scatter_kws={'edgecolors':'black', 'zorder':1}, line_kws={'zorder':0})
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.legend(loc='upper left', fancybox=True)
ax.set_ylabel('Number of Clusters')
ax.set_xlabel('Number of Runs of an Application')
positions = [0, 1, 2, 3]
labels = ['$10^0$', '$10^1$', '$10^2$', '$10^3$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
positions = [0, 1, 2, 3, 4 , 5]
labels = ['0', '$10^1$', '$10^2$', '$10^3$', '$10^4$', '$10^5$']
ax.xaxis.set_major_locator(ticker.FixedLocator(positions))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = [np.log10(x) for x in ticks]
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
f_ticks = []
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+4 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_xticks(f_ticks, minor=True)
plt.savefig(join(save_dir, 'no_runs_v_no_clusters_in_app.pdf'))
plt.clf()
plt.close()
def plot_cluster_sizes(path_to_data, save_dir):
'''
CDFs of read and write cluster sizes.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
df = df[df['Cluster Size']>40]
df['Cluster Size'] = np.log10(df['Cluster Size'])
read_info = df[df['Operation']=='Read']['Cluster Size'].tolist()
read_median = np.median(read_info)
read_75th = np.percentile(read_info, 75)
print("Median of Read: %d"%10**read_median)
print("75th Percentile of Read: %d"%10**read_75th)
read_mean = np.mean(read_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_info = df[df['Operation']=='Write']['Cluster Size'].tolist()
write_median = np.median(write_info)
write_75th = np.percentile(write_info, 75)
print("Median of Write: %d"%10**write_median)
print("75th Percentile of Write: %d"%10**write_75th)
write_mean = np.mean(write_info)
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
# Get percentile info for each application
applications = df['Application'].unique().tolist()
for application in applications:
print("\n%s\n-------------"%application)
app_info = df[df['Application']==application]
median = np.median(app_info[app_info['Operation']=='Read']['Cluster Size'].tolist())
sfth = np.percentile(app_info[app_info['Operation']=='Read']['Cluster Size'].tolist(), 75)
print("Median of Read: %d"%10**median)
print("75th Percentile of Read: %d"%10**sfth)
median = np.median(app_info[app_info['Operation']=='Write']['Cluster Size'].tolist())
sfth = np.percentile(app_info[app_info['Operation']=='Write']['Cluster Size'].tolist(), 75)
print("Median of Write: %d"%10**median)
print("75th Percentile of Write: %d"%10**sfth)
fig, ax = plt.subplots(1, 1, sharey=True, figsize=[5, 2])
fig.subplots_adjust(left=0.30, right=0.85, top=.94, bottom=0.24, wspace=0.12)
ax.plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
ax.plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
ax.set_ylim(0,1)
ax.set_yticks(np.arange(0,1.2,0.25))
vals = ax.get_yticks()
ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
ax.set_ylabel('CDF of Clusters')
ax.set_xlabel('Number of Runs')
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.set_xlim(np.log10(40),4)
positions = [2, 3, 4 ]
labels = ['$10^2$', '$10^3$', '$10^4$']
ax.xaxis.set_major_locator(ticker.FixedLocator(positions))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ticks = [1,2,3,4,5,6,7,8,9]
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
ax.axvline(read_median, color='skyblue', zorder=0, linestyle='--', linewidth=2)
ax.axvline(write_median, color='maroon', zorder=0, linestyle=':', linewidth=2)
# Add legend
ax.legend(loc='lower right', fancybox=True)
plt.savefig(join(save_dir, 'no_runs_in_clusters.pdf'))
plt.clf()
plt.close()
def plot_size_amount_v_perf_cov(path_to_data, save_dir):
'''
Plots boxplots to show how I/O amount affects performance variation.
Parameters
----------
path_to_data: string
Path to directory containing data to plot.
save_dir: string
Path to the directory to save the plots in.
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
range = []
for n in df['Average I/O Amount (bytes)']:
if(100000<n<100000000):
range.append('<100M')
elif(100000000<n<500000000):
range.append('100M-\n500M')
elif(500000000<n<1000000000):
range.append('500M-\n1G')
elif(1000000000<n<1500000000):
range.append('1G-\n1.5G')
elif(1500000000<n):
range.append('1.5G+')
else:
print("don't forget: %d"%n)
df['Range'] = range
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,2])
fig.subplots_adjust(left=0.12, right=0.990, top=0.96, bottom=0.41, wspace=0.03)
df_read = df[df['Operation']=='Read']
df_write = df[df['Operation']=='Write']
m = np.median(df_read[df_read['Range']=='<100M']['Performance CoV (%)'])
print('Read median, <100mb: %.3f'%m)
m = np.median(df_read[df_read['Range']=='1.5G+']['Performance CoV (%)'])
print('Read median, >1.5G: %.3f'%m)
m = np.median(df_write[df_write['Range']=='<100M']['Performance CoV (%)'])
print('Write median, <100mb: %.3f'%m)
m = np.median(df_write[df_write['Range']=='1.5G+']['Performance CoV (%)'])
print('Write median, >1.5G: %.3f'%m)
order = ['<100M', '100M-\n500M', '500M-\n1G', '1G-\n1.5G', '1.5G+']
PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], x='Range', y='Performance CoV (%)', data=df_read, order=order, color='skyblue', fliersize=0, **PROPS)
PROPS = {'boxprops':{'facecolor':'maroon', 'edgecolor':'black'}, 'medianprops':{'color':'white', 'linewidth': 1.25},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], x='Range', y='Performance CoV (%)', data=df_write, order=order,color='maroon', fliersize=0, **PROPS)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
fig.text(0.33, 0.13, '(a) Read', ha='center')
fig.text(0.77, 0.13, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'I/O Amount (bytes)', ha='center')
fig.text(0.001, 0.55, "Performance CoV (%)", rotation=90, va='center', multialignment='center')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(0,50)
axes[0].tick_params(axis='x', labelsize=10)
axes[1].tick_params(axis='x', labelsize=10)
plt.savefig(join(save_dir, 'info_amount.pdf'))
plt.close()
plt.clf()
def plot_perf_v_no_run(path_to_data, save_dir):
'''
Plots number of run affect on performance variation.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
range = []
for n in df['Number of Runs']:
# 40-60, 60-100, 100-200, 200-500, 500-1000, 1000+
if(n<60):
range.append('40-\n60')
elif(n<100):
range.append('60-\n100')
elif(n<200):
range.append('100-\n200')
elif(n<500):
range.append('200-\n500')
elif(n<1000):
range.append('500-\n1000')
elif(n>=1000):
range.append('1000+')
else:
print("don't forget: %d"%n)
df['Range'] = range
df_read = df[df['Operation']=='Read']
df_write = df[df['Operation']=='Write']
corr_read = df_read['Performance CoV (%)'].corr(df_read['Number of Runs'])
corr_write = df_write['Performance CoV (%)'].corr(df_write['Number of Runs'])
print('Spearman correlation of read: %f'%corr_read)
print('Spearman correlation of write: %f'%corr_write)
# Barplot of time periods to performance CoV
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,2])
fig.subplots_adjust(left=0.11, right=0.990, top=0.96, bottom=0.42, wspace=0.03)
order = ['40-\n60', '60-\n100', '100-\n200', '200-\n500', '500-\n1000', '1000+']
PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], x='Range', y='Performance CoV (%)', data=df_read, order=order, color='skyblue', fliersize=0, **PROPS)
PROPS = {'boxprops':{'facecolor':'maroon', 'edgecolor':'black'}, 'medianprops':{'color':'white', 'linewidth': 1.25},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], x='Range', y='Performance CoV (%)', data=df_write, order=order,color='maroon', fliersize=0, **PROPS)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
fig.text(0.33, 0.12, '(a) Read', ha='center')
fig.text(0.77, 0.12, '(b) Write', ha='center')
fig.text(0.55, 0.02, 'Cluster Size', ha='center')
fig.text(0.001, 0.55, "Performance CoV (%)", rotation=90, va='center', multialignment='center')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].tick_params(axis='x', labelsize=11)
axes[1].tick_params(axis='x', labelsize=11)
axes[0].set_ylim(0,50)
plt.savefig(join(save_dir, 'no_runs_v_perf_cov.pdf'))
plt.close()
plt.clf()
return None
def plot_cluster_covs(path_to_data, save_dir):
'''
CDFs of read and write cluster CoVs.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
df['Performance CoV (%)'] = np.log10(df['Performance CoV (%)'])
read_info = df[df['Operation']=='Read']['Performance CoV (%)'].tolist()
read_median = np.median(read_info)
print("Median of Read: %d"%10**read_median)
read_mean = np.mean(read_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_info = df[df['Operation']=='Write']['Performance CoV (%)'].tolist()
write_median = np.median(write_info)
print("Median of Write: %d"%10**write_median)
write_mean = np.mean(write_info)
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
fig, ax = plt.subplots(1, 1, sharey=True, figsize=[5, 2])
fig.subplots_adjust(left=0.28, right=0.80, top=.94, bottom=0.26, wspace=0.12)
ax.plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
ax.plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
ax.set_ylim(0,1)
ax.set_yticks(np.arange(0,1.2,0.25))
vals = ax.get_yticks()
ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
ax.set_ylabel('CDF of Clusters')
ax.set_xlabel('Performance CoV (%)')
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.set_xlim(0, 2)
positions = [0,1,2]
labels = ['$10^0$', '$10^1$', '$10^2$']
ax.xaxis.set_major_locator(ticker.FixedLocator(positions))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
ax.axvline(read_median, color='skyblue', zorder=0, linestyle='--', linewidth=2)
ax.axvline(write_median, color='maroon', zorder=0, linestyle=':', linewidth=2)
# Add legend
ax.legend(loc='lower right', fancybox=True)
plt.savefig(join(save_dir, 'covs_cluster.pdf'))
plt.clf()
plt.close()
def plot_no_unique_behaviors(path_to_cluster_info, save_dir=''):
'''
Identify busier times that include runs from all
three applications.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering information.
Returns
-------
results: Pandas.DataFrame
Information on the runs within the clusters.
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
#df = cluster_info.groupby(['Application', 'Operation'])['Cluster Size'].nunique().reset_index(name='Number of Clusters')
df = df[df['Cluster Size']>40]
no_read_clusters = df[df['Operation']=='Read']['Application'].value_counts().rename('Number of Clusters').reset_index()
no_write_clusters = df[df['Operation']=='Write']['Application'].value_counts().rename('Number of Clusters').reset_index()
no_read_clusters['Operation'] = 'Read'
no_write_clusters['Operation'] = 'Write'
df = no_read_clusters.append(no_write_clusters, ignore_index=True)
df['Application'] = df['index']
df = df.drop('index', axis=1)
d = {'Number of Clusters': int(no_read_clusters['Number of Clusters'].sum()), 'Operation': 'Read', 'Application': 'Overall'}
df = df.append(d, ignore_index=True)
d = {'Number of Clusters': int(no_write_clusters['Number of Clusters'].sum()), 'Operation': 'Write', 'Application': 'Overall'}
df = df.append(d, ignore_index=True)
df['Number of Clusters'] = np.log10(df['Number of Clusters'])
print(df)
fig, ax = plt.subplots(1, 1, figsize=[5, 2])
fig.subplots_adjust(left=0.12, right=0.99, top=.945, bottom=0.45, wspace=0.25)
order = ['Overall', 'vasp_gam_406746', 'mosst_dynamo.x_410575', 'pw.x_415566', 'pw.x_416364', 'vasp54withoutneb_397009', 'pw.x_381413', 'SpEC_383751', 'ideal.exe_309432', 'wrf.exe_309432', 'pp.x_381413']
labels = ['Overall', 'vasp0', 'mosst0', 'QE0', 'QE1', 'vasp1', 'QE2', 'spec0', 'wrf0', 'wrf1', 'QE3']
rects = sns.barplot(data=df, x='Application', y='Number of Clusters', hue='Operation', ax=ax, edgecolor='black', linewidth=2, palette={'Read': 'skyblue', 'Write': 'maroon'}, order=order)
plt.setp(ax.artists, alpha=1, linewidth=2, fill=False, edgecolor="black")
# Add number to bars
'''
for p in ax.patches:
a = (10**float(p.get_height()))
a = str(int(a))
ax.annotate(a, (p.get_x() + p.get_width() / 2., p.get_height()-0.04),
ha = 'center', va = 'center', xytext = (0, 9), textcoords = 'offset points', fontsize=8)
'''
# Fix x-axis labels
new_labels = ['%s\n%s'%('_'.join(x.get_text().split('_')[0:-1]), x.get_text().split('_')[-1]) for x in ax.get_xticklabels()]
# Labels for axes
ax.set_xticklabels(labels, rotation=90)
ax.set_xlabel('')
ax.set_ylabel('')
fig.text(0.50, 0.06, 'Application', ha='center', va='center')
fig.text(0.003, 0.20, 'Number of Clusters', rotation=90)
# Fix y-axis labels
ax.set_ylim(0,3)
positions = [0, 1, 2, 3, 4]
labels = ['$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
#ticks = [1,2,3,4,]
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
# Add grid
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
# Add legend
ax.legend(loc='upper right', fancybox=True)
plt.savefig(join(save_dir, 'no_clusters_in_applications.pdf'))
plt.clf()
plt.close()
def plot_cluster_size_percentiles(path_to_data, save_dir):
'''
Barplot of read and write cluster size percentiles.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
results = pd.DataFrame(columns=['Application', 'Operation', 'Percentile', 'Number of Runs'])
df = df[df['Cluster Size']>40]
df['Cluster Size'] = np.log10(df['Cluster Size'])
read_info = df[df['Operation']=='Read']['Cluster Size'].tolist()
read_median = np.median(read_info)
read_75th = np.percentile(read_info, 75)
print("Median of Read: %d"%10**read_median)
d = {'Application': 'Overall', 'Operation': 'Read', 'Percentile': 50, 'Number of Runs': 10**read_median}
results = results.append(d, ignore_index=True)
print("75th Percentile of Read: %d"%10**read_75th)
d = {'Application': 'Overall', 'Operation': 'Read', 'Percentile': 75, 'Number of Runs': 10**read_75th}
results = results.append(d, ignore_index=True)
write_info = df[df['Operation']=='Write']['Cluster Size'].tolist()
write_median = np.median(write_info)
write_75th = np.percentile(write_info, 75)
print("Median of Write: %d"%10**write_median)
d = {'Application': 'Overall', 'Operation': 'Write', 'Percentile': 50, 'Number of Runs': int(10**write_median)}
results = results.append(d, ignore_index=True)
print("75th Percentile of Write: %d"%10**write_75th)
d = {'Application': 'Overall', 'Operation': 'Write', 'Percentile': 75, 'Number of Runs': int(10**write_75th)}
results = results.append(d, ignore_index=True)
# Get percentile info for each application
applications = df['Application'].unique().tolist()
for application in applications:
print("\n%s\n-------------"%application)
app_info = df[df['Application']==application]
median = np.median(app_info[app_info['Operation']=='Read']['Cluster Size'].tolist())
sfth = np.percentile(app_info[app_info['Operation']=='Read']['Cluster Size'].tolist(), 75)
print("Median of Read: %d"%10**median)
d = {'Application': application, 'Operation': 'Read', 'Percentile': 50, 'Number of Runs': int(10**median)}
results = results.append(d, ignore_index=True)
print("75th Percentile of Read: %d"%10**sfth)
d = {'Application': application, 'Operation': 'Read', 'Percentile': 75, 'Number of Runs': int(10**sfth)}
results = results.append(d, ignore_index=True)
median = np.median(app_info[app_info['Operation']=='Write']['Cluster Size'].tolist())
sfth = np.percentile(app_info[app_info['Operation']=='Write']['Cluster Size'].tolist(), 75)
print("Median of Write: %d"%10**median)
d = {'Application': application, 'Operation': 'Write', 'Percentile': 50, 'Number of Runs': int(10**median)}
results = results.append(d, ignore_index=True)
print("75th Percentile of Write: %d"%10**sfth)
d = {'Application': application, 'Operation': 'Write', 'Percentile': 75, 'Number of Runs': int(10**sfth)}
results = results.append(d, ignore_index=True)
print(results)
medians = results[results['Percentile']==50]
#medians = medians.sort_values('Number of Runs')
medians['Number of Runs'] = np.log10(medians['Number of Runs'])
print(medians)
fig, ax = plt.subplots(1, 1, figsize=[5, 2])
fig.subplots_adjust(left=0.15, right=0.99, top=.945, bottom=0.45, wspace=0.25)
order = ['vasp_gam_406746', 'mosst_dynamo.x_410575', 'pw.x_415566', 'pw.x_416364', 'vasp54withoutneb_397009', 'pw.x_381413', 'SpEC_383751', 'ideal.exe_309432', 'wrf.exe_309432', 'pp.x_381413']
labels = ['vasp0', 'mosst0', 'QE0', 'QE1', 'vasp1', 'QE2', 'spec0', 'wrf0', 'wrf1', 'QE3']
rects = sns.barplot(data=medians, x='Application', y='Number of Runs', hue='Operation', ax=ax, edgecolor='black', linewidth=2, palette={'Read': 'skyblue', 'Write': 'maroon'}, order=order)
plt.setp(ax.artists, alpha=1, linewidth=2, fill=False, edgecolor="black")
# Add number to bars
'''
for p in ax.patches:
a = (10**float(p.get_height()))
a = '%d'%a
ax.annotate(a, (p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center', xytext = (0, 9), textcoords = 'offset points')
'''
# Fix x-axis labels
#new_labels = ['%s\n%s'%('_'.join(x.get_text().split('_')[0:-1]), x.get_text().split('_')[-1]) for x in ax.get_xticklabels()]
# Labels for axes
ax.set_xticklabels(labels, rotation=90)
ax.set_xlabel('')
ax.set_ylabel('')
fig.text(0.50, 0.05, 'Application', ha='center', va='center')
fig.text(0.04, 0.6, 'Median Number of\nRuns in Clusters', rotation=90, ha='center', va='center')
# Fix y-axis labels
ax.set_ylim(0,3)
positions = [0, 1, 2, 3, 4]
labels = ['$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
#ticks = [1,2,3,4,]
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
# Add grid
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
# Add legend
ax.get_legend().remove()
ax.legend(loc=[0.02,0.05], fancybox=True, fontsize=12)
plt.savefig(join(save_dir, 'median_no_runs_in_clusters_by_application.pdf'))
plt.clf()
plt.close()
def plot_cluster_cmp_perf_tod(path_to_data, E=0.10):
df = pd.read_csv(path_to_data, index_col=0)
fig, axes = plt.subplots(2, 1, sharex=True, figsize=[5, 3.3])
fig.subplots_adjust(left=0.14, right=0.965, top=.94, bottom=0.21, wspace=0.12)
range_tod = []
range_tow = []
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/Chicago')
order = ['0-\n3', '3-\n6', '6-\n9', '9-\n12', '12-\n15', '15-\n18', '18-\n21', '21-\n24']
for n in df['Start Time']:
datetime_time = datetime.fromtimestamp(n).replace(tzinfo=from_zone).astimezone(to_zone)
h = int(datetime_time.hour)
d = int(datetime_time.weekday())
# Group by time of day
if(h == 0 or h == 1 or h == 2):
range_tod.append('0-\n3')
elif(h == 3 or h == 4 or h == 5):
range_tod.append('3-\n6')
elif(h == 6 or h == 7 or h == 8):
range_tod.append('6-\n9')
elif(h == 9 or h == 10 or h == 11):
range_tod.append('9-\n12')
elif(h == 12 or h == 13 or h == 14):
range_tod.append('12-\n15')
elif(h == 15 or h == 16 or h == 17):
range_tod.append('15-\n18')
elif(h == 18 or h == 19 or h == 20):
range_tod.append('18-\n21')
elif(h == 21 or h == 22 or h == 23):
range_tod.append('21-\n24')
else:
print("don't forget: %d"%n)
df['Range, Time of Day'] = range_tod
# Read
# Find performance variation
operation = 'Read'
cluster_op_info = df[df['Operation']==operation]
applications = cluster_op_info['Application'].unique().tolist()
results = pd.DataFrame()
for application in applications:
clusters = cluster_op_info[cluster_op_info['Application']==application]
n = clusters['Cluster Number'].max()
clusters_start_time = clusters['Start Time'].min()
for i in range(0, n+1):
perf_covs = []
cluster = clusters[clusters['Cluster Number']==i]
cluster_size = cluster.shape[0]
if(cluster_size<40):
continue
perf_cov = stats.variation(cluster['Performance'])
for n in range(0,cluster_size):
perf_covs.append(perf_cov)
cluster['Performance CoV'] = perf_covs
results = results.append(cluster, ignore_index=True)
E_25 = int(results.shape[0]*E)
l = results.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = E*100
h = results.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 100-E*100
results = h.append(l, ignore_index=True)
#print(results)
TODs = results['Range, Time of Day'].unique().tolist()
plot = pd.DataFrame()
for TOD in TODs:
count = l[l['Range, Time of Day']==TOD].shape[0]
d = {'TOD': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Bottom 10%'}
plot = plot.append(d, ignore_index=True)
count = h[h['Range, Time of Day']==TOD].shape[0]
d = {'TOD': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Top 10%'}
plot = plot.append(d, ignore_index=True)
sns.barplot(ax=axes[0], data=plot, x='TOD', y='Count', hue='Performance CoV Percentile', edgecolor='black', linewidth=2, palette='Blues', order=order)
# Write
# Find performance variation
operation = 'Write'
cluster_op_info = df[df['Operation']==operation]
applications = cluster_op_info['Application'].unique().tolist()
results = pd.DataFrame()
for application in applications:
clusters = cluster_op_info[cluster_op_info['Application']==application]
n = clusters['Cluster Number'].max()
clusters_start_time = clusters['Start Time'].min()
for i in range(0, n+1):
perf_covs = []
cluster = clusters[clusters['Cluster Number']==i]
cluster_size = cluster.shape[0]
if(cluster_size<40):
continue
perf_cov = stats.variation(cluster['Performance'])
for n in range(0,cluster_size):
perf_covs.append(perf_cov)
cluster['Performance CoV'] = perf_covs
results = results.append(cluster, ignore_index=True)
E_25 = int(results.shape[0]*E)
l = results.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = E*100
h = results.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 100-E*100
results = h.append(l, ignore_index=True)
#print(results)
TODs = results['Range, Time of Day'].unique().tolist()
plot = pd.DataFrame()
for TOD in TODs:
count = l[l['Range, Time of Day']==TOD].shape[0]
d = {'TOD': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Bottom 10%'}
plot = plot.append(d, ignore_index=True)
count = h[h['Range, Time of Day']==TOD].shape[0]
d = {'TOD': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Top 10%'}
plot = plot.append(d, ignore_index=True)
sns.barplot(ax=axes[1], data=plot, x='TOD', y='Count', hue='Performance CoV Percentile', edgecolor='black', linewidth=2, palette='Reds', order=order)
# Plot aesthetics
axes[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
axes[1].tick_params(axis='x', labelrotation = 0)
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].set_axisbelow(True)
axes[0].set_ylim(0,2000)
axes[1].set_ylim(0,2000)
fig.text(0.55, 0.01, 'Time of Day (24-hr)', ha='center')
fig.text(0.001, 0.55, "Number of Runs", rotation=90, va='center')
fig.text(0.50, 0.95, 'Read')
fig.text(0.50, 0.55, 'Write')
axes[0].legend(loc=(0.10,0.43),fontsize=9, title='Perf CoV Percentile', title_fontsize=10)
axes[1].legend(loc=(0.60,0.01),fontsize=9, title='Perf CoV Percentile', title_fontsize=10)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
#axes[1].set_xticklabels(labels)
plt.savefig('./performance_cov_percentile_features_TOD.pdf')
def plot_cluster_cmp_perf_dow(path_to_data, E=0.10):
df = pd.read_csv(path_to_data, index_col=0)
fig, axes = plt.subplots(2, 1, sharex=True, figsize=[5, 2.2])
fig.subplots_adjust(left=0.14, right=0.75, top=.90, bottom=0.20, hspace=0.35)
range_tod = []
range_tow = []
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/Chicago')
order = ['Mo','Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
for n in df['Start Time']:
datetime_time = datetime.fromtimestamp(n).replace(tzinfo=from_zone).astimezone(to_zone)
h = int(datetime_time.hour)
d = int(datetime_time.weekday())
# Now for time of week
if(d == 0):
range_tow.append('Mo')
elif(d == 1):
range_tow.append('Tu')
elif(d == 2):
range_tow.append('We')
elif(d == 3):
range_tow.append('Th')
elif(d == 4):
range_tow.append('Fr')
elif(d == 5):
range_tow.append('Sa')
elif(d == 6):
range_tow.append('Su')
else:
print("don't forget: %d"%n)
df['Range, Time of Week'] = range_tow
# Read
# Find performance variation
operation = 'Read'
cluster_op_info = df[df['Operation']==operation]
applications = cluster_op_info['Application'].unique().tolist()
results = pd.DataFrame()
for application in applications:
clusters = cluster_op_info[cluster_op_info['Application']==application]
n = clusters['Cluster Number'].max()
clusters_start_time = clusters['Start Time'].min()
for i in range(0, n+1):
perf_covs = []
cluster = clusters[clusters['Cluster Number']==i]
cluster_size = cluster.shape[0]
if(cluster_size<40):
continue
perf_cov = stats.variation(cluster['Performance'])
for n in range(0,cluster_size):
perf_covs.append(perf_cov)
cluster['Performance CoV'] = perf_covs
results = results.append(cluster, ignore_index=True)
E_25 = int(results.shape[0]*E)
l = results.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = E*100
h = results.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 100-E*100
results = h.append(l, ignore_index=True)
#print(results)
TODs = results['Range, Time of Week'].unique().tolist()
plot = pd.DataFrame()
for TOD in TODs:
count = l[l['Range, Time of Week']==TOD].shape[0]
d = {'DOW': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Bottom 10%'}
plot = plot.append(d, ignore_index=True)
count = h[h['Range, Time of Week']==TOD].shape[0]
d = {'DOW': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Top 10%'}
plot = plot.append(d, ignore_index=True)
print(plot)
sns.barplot(ax=axes[0], data=plot, x='DOW', y='Count', hue='Performance CoV Percentile', edgecolor='black', linewidth=2, palette='Blues', order=order)
# Write
# Find performance variation
operation = 'Write'
cluster_op_info = df[df['Operation']==operation]
applications = cluster_op_info['Application'].unique().tolist()
results = pd.DataFrame()
for application in applications:
clusters = cluster_op_info[cluster_op_info['Application']==application]
n = clusters['Cluster Number'].max()
clusters_start_time = clusters['Start Time'].min()
for i in range(0, n+1):
perf_covs = []
cluster = clusters[clusters['Cluster Number']==i]
cluster_size = cluster.shape[0]
if(cluster_size<40):
continue
perf_cov = stats.variation(cluster['Performance'])
for n in range(0,cluster_size):
perf_covs.append(perf_cov)
cluster['Performance CoV'] = perf_covs
results = results.append(cluster, ignore_index=True)
E_25 = int(results.shape[0]*E)
l = results.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = E*100
h = results.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 100-E*100
results = h.append(l, ignore_index=True)
#print(results)
TODs = results['Range, Time of Week'].unique().tolist()
plot = pd.DataFrame()
for TOD in TODs:
count = l[l['Range, Time of Week']==TOD].shape[0]
d = {'DOW': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Bottom 10%'}
plot = plot.append(d, ignore_index=True)
count = h[h['Range, Time of Week']==TOD].shape[0]
d = {'DOW': TOD, 'Count': int(count), 'Performance CoV Percentile': 'Top 10%'}
plot = plot.append(d, ignore_index=True)
print(plot)
sns.barplot(ax=axes[1], data=plot, x='DOW', y='Count', hue='Performance CoV Percentile', edgecolor='black', linewidth=2, palette='Reds', order=order)
# Plot aesthetics
axes[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
axes[1].tick_params(axis='x', labelrotation = 0)
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].set_axisbelow(True)
axes[0].set_ylim(0,4000)
axes[1].set_ylim(0,4000)
axes[0].set_yticks([x for x in range(0,4001,2000)])
axes[1].set_yticks([x for x in range(0,4001,2000)])
fig.text(0.45, 0.02, 'Day of Week', ha='center')
fig.text(0.001, 0.54, "Number of Runs", rotation=90, va='center')
fig.text(0.35, 0.93, '(a) Read')
fig.text(0.35, 0.515, '(b) Write')
axes[0].legend(loc=(1.01,0.1),fontsize=8, title='Perf CoV Percentile', title_fontsize=8)
axes[1].legend(loc=(1.01,0.1),fontsize=8, title='Perf CoV Percentile', title_fontsize=8)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
#axes[1].set_xticklabels(labels)
plt.savefig('./performance_cov_percentile_features_DOW.pdf')
def plot_perf_v_temporal(path_to_data, save_dir):
'''
Plots temporal effects on performance.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
range = []
for n in df['Total Time']:
if(n<86400):
range.append('<1d')
elif(n<259200):
range.append('1-3d')
elif(n<604800):
range.append('3d-1w')
elif(n<(2592000/2)):
range.append('1w-2w')
elif(n<2592000):
range.append('2w-1M')
elif(n<7776000):
range.append('1-3M')
elif(n<15552000):
range.append('3-6M')
else:
print("don't forget: %d"%n)
df['Range'] = range
read_df = df[df['Operation']=='Read']
write_df = df[df['Operation']=='Write']
range_labels = read_df['Range'].unique()
print("For Read:")
for range_label in range_labels:
print('Range Label, Number of Clusters: %s %d'%(range_label, len(read_df[read_df['Range']==range_label])))
range_labels = write_df['Range'].unique()
print('For Write')
for range_label in range_labels:
print('Range Label, Number of Clusters: %s %d'%(range_label, len(write_df[write_df['Range']==range_label])))
# Barplot of time periods to performance CoV
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,2])
fig.subplots_adjust(left=0.12, right=0.990, top=0.96, bottom=0.45, wspace=0.03)
order = ['<1d', '1-3d', '3d-1w', '1w-2w', '2w-1M', '1-3M', '3-6M']
labels = ['<1d', '1-\n3d', '3d-\n1w', '1w-\n2w', '2w-\n1M', '1-\n3M', '3-\n6M']
print(read_df)
PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], x='Range', y='Performance CoV (%)', data=read_df, order=order, fliersize=0, **PROPS)
PROPS = {'boxprops':{'facecolor':'maroon', 'edgecolor':'black'}, 'medianprops':{'color':'white'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], x='Range', y='Performance CoV (%)', data=write_df, order=order, linewidth=1.2, fliersize=0, **PROPS)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].set_xticklabels(labels)
axes[1].set_xticklabels(labels)
fig.text(0.37, 0.12, '(a) Read', ha='center')
fig.text(0.78, 0.12, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'Cluster Time Span', ha='center')
fig.text(0.001, 0.55, "Performance CoV (%)", rotation=90, va='center', multialignment='center')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(0,50)
plt.savefig(join(save_dir, 'time_period_v_perf_cov.pdf'))
plt.close()
plt.clf()
return None
def plot_cluster_covs_by_application(path_to_data, save_dir):
'''
CDFs of read and write cluster CoVs.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df_all = pd.read_csv(path_to_data, index_col=0)
applications = df_all['Application'].unique().tolist()
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=[5, 3])
fig.subplots_adjust(left=0.15, right=0.97, top=.97, bottom=0.18, hspace=0.20, wspace=0.20)
i = 0
for application in applications:
print(application)
df = df_all[df_all['Application']==application]
if(len(df)<10):
continue
df['Performance CoV (%)'] = np.log10(df['Performance CoV (%)'])
read_info = df[df['Operation']=='Read']['Performance CoV (%)'].tolist()
read_median = np.median(read_info)
print("Median of Read: %d"%10**read_median)
read_mean = np.mean(read_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_info = df[df['Operation']=='Write']['Performance CoV (%)'].tolist()
write_median = np.median(write_info)
print("Median of Write: %d"%10**write_median)
write_mean = np.mean(write_info)
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
if(i==0):
a = 0
b = 0
elif(i==1):
a = 0
b = 1
elif(i==2):
a = 1
b = 0
elif(i==3):
a = 1
b = 1
axes[a][b].plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2)
axes[a][b].plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2)
axes[a][b].set_ylim(0,1)
axes[a][b].set_yticks(np.arange(0,1.2,0.50))
vals = axes[a][b].get_yticks()
axes[a][b].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
#axes[i].set_ylabel('Percent of Clusters')
#axes[i].set_xlabel('Performance CoV (%)')
axes[a][b].yaxis.grid(color='lightgrey', linestyle=':')
axes[a][b].set_axisbelow(True)
axes[a][b].set_xlim(0, 2)
positions = [0,1,2]
labels = ['$10^0$', '$10^1$', '$10^2$']
axes[a][b].xaxis.set_major_locator(ticker.FixedLocator(positions))
axes[a][b].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
axes[a][b].set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
axes[a][b].axvline(read_median, color='skyblue', zorder=0, linestyle='--', linewidth=2)
axes[a][b].axvline(write_median, color='maroon', zorder=0, linestyle=':', linewidth=2)
i = i + 1
# Add legend
legend_elements = [Line2D([0], [0], color='skyblue', lw=2, label='Read', alpha=1), Line2D([0], [0], color='maroon', lw=2, label='Write', alpha=1)]
axes[0][0].legend(handles=legend_elements, loc='upper left', fancybox=True, fontsize=12)
fig.text(0.001, 0.56, 'CDF of Clusters', rotation=90, ha='left', va='center')
fig.text(0.56, 0.008, 'Performance CoV (%)', ha='center', va='bottom')
x0 = 0.603
x1 = 0.153
h = 0.21
s = 0.215
labels = ['QE1','QE0','mosst0','vasp0']
fig.text(x0, h, s=labels[3])
fig.text(x1, h, s=labels[2])
fig.text(x0, h+2*s, s=labels[1])
fig.text(x1, h+2*s, s=labels[0])
plt.savefig(join(save_dir, 'covs_cluster_%s.pdf'%'overall'))
plt.clf()
plt.close()
def plot_barplot_ex_no_overlaps_normalized(path_to_data, save_path):
# pw.x_416364, max: 8
df_all = pd.read_csv(path_to_data, index_col=0)
#applications = df_all['Application'].unique().tolist()
applications = ['pw.x_416364','pw.x_415566','mosst_dynamo.x_410575','vasp_gam_406746']
labels = ['%s\n%s'%('_'.join(x.split('_')[0:-1]), x.split('_')[-1]) for x in applications]
overall_results = pd.DataFrame(columns=['Operation', 'Range', 'Number of Clusters', 'Percent of Clusters'])
r_clusters = len(df_all[df_all['Operation']=='Read'])
w_clusters = len(df_all[df_all['Operation']=='Write'])
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=[5, 3])
fig.subplots_adjust(left=0.15, right=0.99, top=.975, bottom=0.23, hspace=0.20, wspace=0.06)
a = 0
for application in applications:
df = df_all[df_all['Application']==application]
results = pd.DataFrame(columns=['Operation', 'Range', 'Number of Clusters', 'Percent of Clusters'])
range = []
ranges = ['0-\n20%', '20-\n40%', '40-\n60%', '60-\n80%', '80-\n100%']
operation = 'Read'
counts = [0, 0, 0, 0, 0, 0, 0]
n_clusters = len(df[df['Operation']==operation])
for i in df[df['Operation']==operation]['Number of Overlaps']:
if a==0:
f=0
g=0
elif a==1:
f=0
g=1
elif a==2:
f=1
g=0
elif a==3:
f=1
g=1
n = (i/n_clusters)*100
if(n<20):
counts[0] = counts[0] + 1
elif(n<40):
counts[1] = counts[1] + 1
elif(n<60):
counts[2] = counts[2] + 1
elif(n<80):
counts[3] = counts[3] + 1
elif(n<=100):
counts[4] = counts[4] + 1
else:
print("don't forget: %d"%n)
for i in np.arange(0,5):
d = {'Operation': operation, 'Range': ranges[i], 'Number of Clusters': counts[i], 'Percent of Clusters': (counts[i]/n_clusters)}
results = results.append(d, ignore_index=True)
d = {'Operation': operation, 'Range': ranges[i], 'Number of Clusters': counts[i], 'Percent of Clusters': (counts[i]/r_clusters)}
overall_results = overall_results.append(d, ignore_index=True)
operation = 'Write'
counts = [0, 0, 0, 0, 0, 0, 0]
n_clusters = len(df[df['Operation']==operation])
for i in df[df['Operation']==operation]['Number of Overlaps']:
n = (i/n_clusters)*100
if(n<20):
counts[0] = counts[0] + 1
elif(n<40):
counts[1] = counts[1] + 1
elif(n<60):
counts[2] = counts[2] + 1
elif(n<80):
counts[3] = counts[3] + 1
elif(n<=100):
counts[4] = counts[4] + 1
else:
print("don't forget: %d"%n)
#df['Range'] = range
for i in np.arange(0,5):
d = {'Operation': operation, 'Range': ranges[i], 'Number of Clusters': counts[i], 'Percent of Clusters': (counts[i]/n_clusters)}
results = results.append(d, ignore_index=True)
d = {'Operation': operation, 'Range': ranges[i], 'Number of Clusters': counts[i], 'Percent of Clusters': (counts[i]/w_clusters)}
overall_results = overall_results.append(d, ignore_index=True)
print(results)
sns.barplot(data=results, x='Range', y='Percent of Clusters', hue='Operation', ax=axes[f][g], edgecolor='black', linewidth=2, palette={'Read': 'skyblue', 'Write': 'maroon'}, order=ranges)
axes[f][g].yaxis.grid(color='lightgrey', linestyle=':')
axes[f][g].set_axisbelow(True)
axes[f][g].get_legend().remove()
axes[f][g].set_ylim(0,1)
vals = axes[f][g].get_yticks()
axes[f][g].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
axes[f][g].set_xlabel('')
axes[f][g].set_ylabel('')
axes[f][g].set_yticks([0,.25,.50,.75,1.00],minor=True)
axes[f][g].yaxis.grid(color='lightgrey', linestyle=':', which='minor')
axes[f][g].set_axisbelow(True)
a = a + 1
axes[0][0].legend(loc='upper left', prop={'size': 10})
fig.text(0.001, 0.56, 'Percent of Clusters', rotation=90, ha='left', va='center')
fig.text(0.56, 0.005, 'Percent of Clusters Overlapped', ha='center', va='bottom')
x1 = 0.40
x0 = x1 + 0.45
h = 0.505
s = 0.206
labels = ['QE1','QE0','mosst0','vasp0']
fig.text(x0, h, s=labels[3])
fig.text(x1, h, s=labels[2])
fig.text(x0, h+2*s, s=labels[1])
fig.text(x1, h+2*s, s=labels[0])
plt.savefig(join(save_path, 'application_examples.pdf'))
plt.clf()
plt.close()
overall_results = overall_results.groupby(['Operation', 'Range']).sum().reset_index()
print(overall_results)
fig, ax = plt.subplots(1, 1, figsize=[5, 1.4])
fig.subplots_adjust(left=0.20, right=0.70, top=.945, bottom=0.47, wspace=0.25)
sns.barplot(data=overall_results, x='Range', y='Percent of Clusters', hue='Operation', ax=ax, edgecolor='black', linewidth=2, palette={'Read': 'skyblue', 'Write': 'maroon'}, order=ranges)
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.get_legend().remove()
ax.legend(title='', loc=[1.02,0.01])
ax.set_ylim(0,1)
vals = ax.get_yticks()
ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
#ax.legend('upper right', fancybox=True)
ax.set_xlabel('')
ax.set_ylabel('')
fig.text(0.46, 0.03, 'Percent of Clusters Overlapped', ha='center')
fig.text(0.05, 0.41, 'Percent of\nClusters', rotation=90, ha='center')
plt.savefig(join('./overall.pdf'))
plt.clf()
plt.close()
def plot_time_spans_by_application(path_to_cluster_info, save_dir):
'''
Plots the temporal behavior of runs in the clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering temporal run information.
Returns
-------
None
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(5, 2))
fig.subplots_adjust(left=0.18, right=0.99, top=.95, bottom=0.28, wspace=0.12)
order = ['pw.x_416364','pw.x_415566','mosst_dynamo.x_410575','vasp_gam_406746']
labels = ['(a)','(b)','(c)','(d)']
df['Time Span'] = df['Total Time']/86400
df['Time Span'] = np.log10(df['Time Span'])
sns.violinplot(data=df, x='Application', y='Time Span', ax=ax, order=order, inner='quartile', linewidth=2, hue='Operation', split=True, palette={'Read':'skyblue', 'Write':'maroon'})
# Labels for axes
ax.set_xticklabels(labels)
ax.set_xlabel('')
ax.set_ylabel('')
fig.text(0.59, 0.05, 'Application', ha='center', va='center')
fig.text(0.003, 0.65, 'Cluster Time\nSpan (Days)', rotation=90, va='center', multialignment='center')
positions = [-2, 0, 2, 4]
labels = ['$10^{-2}$', '$10^{0}$', '$10^{2}$', '$10^4$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x)-3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
# Add Major ticks
ax.set_ylim(-3,4)
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.get_legend().remove()
ax.legend(loc='lower left', fancybox=True, fontsize=10)
print(len(ax.lines))
for i, l in enumerate(ax.lines):
if(i%6==0):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==1):
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==2):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==3):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
elif(i%6==4):
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
elif(i%6==5):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'cluster_time_spans_by_application.pdf'))
plt.clf()
plt.close()
def plot_interarrival_times_by_application(path_to_data, save_dir):
'''
Plot violinplots showing the inter-arrival times of runs within each application.
'''
df = pd.read_csv(path_to_data, index_col=0)
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(5, 2))
fig.subplots_adjust(left=0.18, right=0.99, top=.95, bottom=0.28, wspace=0.12)
#df['Inter-Arrival Time (Hours)'] = np.log10(df['Inter-Arrival Time (Hours)'])
order = ['pw.x_416364','pw.x_415566','mosst_dynamo.x_410575','vasp_gam_406746']
labels = ['(a)','(b)','(c)','(d)']
df['Inter-Arrival Time (Hours)'] = np.log10(df['Inter-Arrival Time (Hours)'])
sns.violinplot(data=df, x='Application', y='Inter-Arrival Time (Hours)', ax=ax, order=order, inner='quartile', linewidth=2, hue='Operation', split=True, palette={'Read':'skyblue', 'Write':'maroon'})
# Labels for axes
ax.set_xticklabels(labels)
ax.set_xlabel('')
ax.set_ylabel('')
fig.text(0.58, 0.05, 'Application', ha='center', va='center')
fig.text(0.003, 0.625, 'Inter-Arrival\nTimes (Hours)', rotation=90, va='center', multialignment='center')
positions = [-4, -2, 0, 2, 4]
labels = ['$10^{-4}$', '$10^{-2}$', '$10^{0}$', '$10^{2}$', '$10^{4}$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x)-5 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-4 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)-1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
# Add Major ticks
ax.set_ylim(-4,4)
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.get_legend().remove()
ax.legend(loc='upper right', fancybox=True)
print(len(ax.lines))
for i, l in enumerate(ax.lines):
if(i%6==0):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==1):
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==2):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
elif(i%6==3):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
elif(i%6==4):
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
elif(i%6==5):
l.set_linestyle('--')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'interarrival_times_by_application.pdf'))
plt.clf()
plt.close()
def plot_cluster_cmp_perf(path_to_data):
df = pd.read_csv(path_to_data, index_col=0)
fig, axes = plt.subplots(2, 1, figsize=[5, 2.2])
fig.subplots_adjust(left=0.12, right=0.70, top=.91, bottom=0.30, wspace=0.40, hspace=0.40)
results = pd.DataFrame()
E = 2
labels = ['I/O\nAmount','Shared\nFiles','Unique\nFiles']#,'1g+','100m-\n1g','10m-\n100m','4m-\n10m','1m-\n4m','100k-\n1m','10k-\n100k','1k-\n10k','100-\n1k','0-\n100']
features = ['I/O Amount (bytes)','Number of Shared Files','Number of Unique Files']#,'1g+','100m-1g','10m-100m','4m-10m','1m-4m','100k-1m','10k-100k','1k-10k','100-1k','0-100']
# Read
operation = 'Read'
results = pd.DataFrame()
op_df = df[df['Operation']==operation]
# Get lowest and highest 25% perf CoV info
E_25 = int(op_df.shape[0]*0.10)
l = op_df.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = 25
h = op_df.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 75
# Clean the data from outliers
#feature_max = []
for feature in features:
n = '%s Z-Score'%feature
l[n] = np.abs(stats.zscore(l[feature]))
h[n] = np.abs(stats.zscore(h[feature]))
op_df = h.append(l, ignore_index=True)
for i, row in op_df.iterrows():
for feature in features:
n = '%s Z-Score'%feature
if(row[n]>E):
continue
tmp = op_df[op_df[n]<E]
try:
value = row[feature]/tmp[feature].max()
except ZeroDivisionError:
value = 0
if(row['Performance CoV Percentile']==25):
p = 'Bottom 10%'
else:
p = 'Top 10%'
d = {'Feature': feature, 'Value': value, 'Performance CoV Percentile': p}
results = results.append(d, ignore_index=True)
PROPS = {'boxprops':{'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], data=results, x='Feature', y='Value', hue='Performance CoV Percentile', palette='Blues', order=features, **PROPS)
# Write
operation = 'Write'
results = pd.DataFrame()
op_df = df[df['Operation']==operation]
# Get lowest and highest 25% perf CoV info
E_25 = int(op_df.shape[0]*0.25)
l = op_df.nsmallest(E_25, ['Performance CoV'])
l['Performance CoV Percentile'] = 25
h = op_df.nlargest(E_25, ['Performance CoV'])
h['Performance CoV Percentile'] = 75
# Clean the data from outliers
for feature in features:
n = '%s Z-Score'%feature
l[n] = np.abs(stats.zscore(l[feature]))
h[n] = np.abs(stats.zscore(h[feature]))
op_df = h.append(l, ignore_index=True)
for i, row in op_df.iterrows():
for feature in features:
n = '%s Z-Score'%feature
if(row[n]>E):
continue
tmp = op_df[op_df[n]<E]
try:
value = row[feature]/tmp[feature].max()
except ZeroDivisionError:
value = 0
if(row['Performance CoV Percentile']==25):
p = 'Bottom 10%'
else:
p = 'Top 10%'
d = {'Feature': feature, 'Value': value, 'Performance CoV Percentile': p}
results = results.append(d, ignore_index=True)
print(results)
PROPS = {'boxprops':{'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], data=results, x='Feature', y='Value', hue='Performance CoV Percentile', palette='Reds', order=features, **PROPS)
axes[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
axes[0].tick_params(axis='x', labelrotation = 0)
axes[1].tick_params(axis='x', labelrotation = 0)
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].set_axisbelow(True)
axes[0].set_ylim(0,1.01)
axes[1].set_ylim(0,1.01)
axes[0].set_yticks([0,0.5,1.0])
axes[1].set_yticks([0,0.5,1.0])
fig.text(0.42, 0.002, 'Cluster Feature', ha='center')
fig.text(0.002, 0.58, "Norm'd Value", rotation=90, va='center')
fig.text(0.30, 0.93, '(a) Read')
fig.text(0.30, 0.58, '(b) Write')
axes[0].legend(loc=(1.02,0.00),fontsize=10, title='Perf CoV Percentile', title_fontsize=10)
axes[1].legend(loc=(1.02,0.00),fontsize=10, title='Perf CoV Percentile', title_fontsize=10)
axes[0].set_ylabel('')
axes[1].set_ylabel('')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].set_xticklabels('')
axes[1].set_xticklabels(labels)
plt.savefig('./performance_cov_percentile_features.pdf')
def run_spread_temporally_examples(path_to_cluster_info, save_path):
'''
Identifies and gives examples for the temporal burstiness of clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering information.
Returns
-------
results: Pandas.DataFrame
Temporal information on the runs within the clusters.
'''
cluster_info = pd.read_csv(path_to_cluster_info, index_col=0)
read_clusters = cluster_info[cluster_info['Operation']=='Read']
write_clusters = cluster_info[cluster_info['Operation']=='Write']
read_applications = read_clusters['Application'].unique().tolist()
write_applications = write_clusters['Application'].unique().tolist()
results = | pd.DataFrame(columns=['Cluster Number', 'Times', 'Total Time', 'Inter-Arrival Times CoV']) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = | pd.Series([1, 2, 3], name="asdf") | pandas.Series |
import pandas as pd
import numpy as np
import multiprocessing as mp
import pyteomics.mass
from ..constants import MAX_ION, ION_TYPES, MAX_FRAG_CHARGE
from .. import utils
aa_comp = dict(pyteomics.mass.std_aa_comp)
aa_comp["o"] = pyteomics.mass.Composition({"O": 1})
translate2spectronaut = {"C": "C[Carbamidomethyl (C)]", "M(ox)": "M[Oxidation (M)]"}
shape = [MAX_ION, len(ION_TYPES), MAX_FRAG_CHARGE]
FragmentNumber = np.zeros(shape, dtype=int)
FragmentType = np.zeros(shape, dtype="object")
FragmentCharge = np.zeros(shape, dtype=int)
for z in range(MAX_FRAG_CHARGE):
for j in range(MAX_ION):
for tyi, ty in enumerate(ION_TYPES):
FragmentNumber[j, tyi, z] = j + 1
FragmentType[j, tyi, z] = ty
FragmentCharge[j, tyi, z] = z + 1
FragmentNumber = FragmentNumber.flatten()
FragmentType = FragmentType.flatten()
FragmentCharge = FragmentCharge.flatten()
def convert_multiple_spectra(data):
# modified from "convert_spectrum" by <NAME> to simplify getting a dataframe from the prediction.
out_df = pd.DataFrame()
for i in range(data['intensities_pred'].shape[0]):
df = pd.DataFrame(
{
"RelativeIntensity": data["intensities_pred"][i, :],
"FragmentMz": data["masses_pred"][i, :],
"idx": list(range(174)),
}
)
spectrum = df[df.RelativeIntensity > 0].reset_index(drop=True)
idx = list(spectrum.idx)
sequence = utils.get_sequence(data["sequence_integer"][i])
charge = int(data["precursor_charge_onehot"][i].argmax() + 1)
irt = float(data["iRT"][i])
precursor_mz = pyteomics.mass.calculate_mass(
sequence=sequence.replace("M(ox)", "oM"), charge=charge, aa_comp=aa_comp
)
spectrum["ModifiedPeptide"] = sequence
spectrum["LabeledPeptide"] = sequence
spectrum["StrippedPeptide"] = spectrum.LabeledPeptide.map(
lambda p: p.replace("M(ox)", "M")
)
spectrum["PrecursorCharge"] = charge
spectrum["PrecursorMz"] = precursor_mz
spectrum["iRT"] = irt
spectrum["FragmentNumber"] = FragmentNumber[idx]
spectrum["FragmentType"] = FragmentType[idx]
spectrum["FragmentCharge"] = FragmentCharge[idx]
spectrum["FragmentLossType"] = "noloss"
for source, target in translate2spectronaut.items():
spectrum["ModifiedPeptide"] = spectrum.ModifiedPeptide.map(
lambda s: s.replace(source, target)
)
spectrum["ModifiedPeptide"] = spectrum.ModifiedPeptide.map(lambda s: "_" + s + "_")
del spectrum["idx"]
out_df = | pd.concat([out_df, spectrum], ignore_index=True) | pandas.concat |
import warnings
import pydot
import graphviz
# Take a look at the raw data :
import pandas as pd
from pandas import Series
from pandas import DataFrame
from pandas import read_csv
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
import matplotlib
# be able to save images on server
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from math import sqrt
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
import sys
import errno
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
import keras
from keras.layers import Input, Convolution1D, Dense, MaxPooling1D, Flatten, Conv2D
from keras.layers import LSTM
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
# be able to save images on server
# matplotlib.use('Agg')
import time
import datetime
from keras.models import load_model
import multiprocessing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide messy TensorFlow warnings
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" %
self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
class RData:
def __init__(self, path, n_weeks=26):
self.path = path
self.data = {}
# load dataset
self.data['raw'] = self.load_data()
# config
self.n_weeks = n_weeks
self.n_features = int(len(self.data['raw'][0].columns))
print("number of features: {}".format(self.n_features))
# scale data
self.scaler = preprocessing.MinMaxScaler()
self.scale()
# reframe data
self.reframe()
# self.state_list_name = self.data.state.unique()
self.split_data()
# print(self.n_features)
# Return specific data
def __getitem__(self, index):
return self.data[index]
# load dataset
def load_data(self):
raw = read_csv(self.path)
raw = raw.fillna(0)
# print(raw['0'].head())
# raw = raw.drop(["0"], axis = 1)
# print(raw.head())
# transform column names
raw.columns = map(str.lower, raw.columns)
# raw.rename(columns={'weekend': 'date'}, inplace=True)
latitudeList = raw.latitude.unique()
longitudeList = raw.longitude.unique()
data_list = list()
cell_label = list()
for la in latitudeList:
for lo in longitudeList:
data = raw[(raw.latitude == la) & (raw.longitude == lo)]
if(len(data) == 260):
select = [
#'date',
#'year',
#'month',
#'week',
#'week_temp',
#'week_prcp',
#'latitude',
#'longitude',
'mean_ili',
#'ili_activity_label',
#'ili_activity_group'
]
# One Hot Encoding
data = pd.get_dummies(data[select])
# print(data.head(1))
data_list.append(data)
cell_label.append('lat {} - long {}'.format(la, lo))
#print("The data for latitude {} and longitude {} contains {} rows".format(
# la, lo, len(data)))
self.data['cell_labels'] = cell_label
print("The are {} cell in the data".format(len(data_list)))
return data_list
# convert series to supervised learning
@staticmethod
def series_to_supervised(df, n_in=26, n_out=26, dropnan=True):
from pandas import concat
data = DataFrame(df)
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
input_list, target_list = list(), list()
input_names, target_names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
input_list.append(df.shift(i))
input_names += [('var%d(t-%d)' % (j + 1, i))
for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
target_list.append(df.shift(-i))
if i == 0:
target_names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
target_names += [('var%d(t+%d)' % (j + 1, i))
for j in range(n_vars)]
# put it all together
samples = concat(input_list, axis=1)
samples.columns = input_names
targets = concat(target_list, axis=1)
targets.columns = target_names
# drop rows with NaN values
if dropnan:
targets.fillna(-1, inplace=True)
samples.fillna(-1, inplace=True)
supervised = [samples, targets]
return supervised
# convert series to supervised learning
@staticmethod
def series_to_reframed(data, n_in=26, n_out=26, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.fillna(-1, inplace=True)
return agg
# frame a sequence as a supervised learning problem
@staticmethod
def _series_to_supervised(data, lag=26):
from pandas import concat
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = | concat(columns, axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*
from __future__ import unicode_literals
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
"""@Reference: https://github.com/5hirish/adam_qas/blob/master/qas/classifier/question_classifier.py"""
#import spacy
#import csv
import logging
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
import pandas as pd
import numpy as np
from sklearn import metrics
import os
logger = logging.getLogger(__name__)
#Constants
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TRAINING_DATA = os.path.join(os.path.dirname(__file__), 'data')
OUTPUT_DIR = os.path.join(os.path.dirname(__file__), 'output')
QUESTION_CLASSIFIER_TRAINING_DATA = "qclassifier_trainer.csv"
QUESTION_CLASSIFICATION_RAW_DATA = "qclassification_data.txt"
#generated model
QUESTION_CLASSIFICATION_MODEL = "questionclassifier.pkl"
EXAMPLE_QUESTION = [
"What is the value of sensor1 in machine1?",
"Give me the members of linkedfactory",
"What does linkedfactory contains?",
"What contains IWU?"
]
EN_MODEL_DEFAULT = "en"
EN_MODEL_SM = "en_core_web_sm"
EN_MODEL_MD = "en_core_web_md"
#EN_MODEL_LG = "en_core_web_lg"
#You can use with a model or a function
class SVMClassifier():
def pre_process(self, dta):
return pd.get_dummies(dta)
def feature_engineering(self, question):
question_class = question.pop('Class')
question.pop('Question')
question.pop('WH-Bigram')
return question_class
def transform_data_matrix(self, question_train, question_predict):
#send into a list of oolumns
question_train_columns = list(question_train.columns)
print("size of dataset:", len(question_train_columns))
question_predict_columns = list(question_predict.columns)
#clear duplicates with set
question_trans_columns = list(set(question_train_columns + question_predict_columns))
logger.debug("Union Columns: {0}".format(len(question_trans_columns)))
trans_data_train = {}
for feature in question_trans_columns:
if feature not in question_train:
trans_data_train[feature] = [0 for i in range(len(question_train.index))]
else:
trans_data_train[feature] = list(question_train[feature])
question_train = | pd.DataFrame(trans_data_train) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, | pd.Timedelta('4 days 00:00:00') | pandas.Timedelta |
import nose
import unittest
from numpy import nan
from pandas.core.daterange import DateRange
from pandas.core.index import Index, MultiIndex
from pandas.core.common import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.core.panel import WidePanel
from collections import defaultdict
import pandas.core.datetools as dt
import numpy as np
import pandas.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def test_python_grouper(self):
groupFunc = self.groupDict.get
groups = groupby(self.stringIndex, groupFunc)
setDict = dict((k, set(v)) for k, v in groups.iteritems())
for idx in self.stringIndex:
key = groupFunc(idx)
groupSet = setDict[key]
assert(idx in groupSet)
class TestGroupBy(unittest.TestCase):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
def test_basic(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
# Cython only returning floating point for now...
assert_series_equal(grouped.agg(np.sum).astype(float),
grouped.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one' : np.mean,
'two' : np.std})
group_constants = {
0 : 10,
1 : 20,
2 : 30
}
agged = grouped.agg(lambda x: group_constants[x.groupName] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
def test_series_agg_corner(self):
# nothing to group, all NA
result = self.ts.groupby(self.ts * np.nan).sum()
assert_series_equal(result, Series([]))
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({'A' : 'var', 'B' : 'std', 'C' : 'mean'})
expected = DataFrame({'A' : grouped['A'].var(),
'B' : grouped['B'].std(),
'C' : grouped['C'].mean()})
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_basic_regression(self):
# regression
T = [1.0*x for x in range(1,10) *10][:1095]
result = Series(T, range(0, len(T)))
groupings = np.random.random((1100,))
groupings = Series(groupings, range(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
transformed = grouped.transform(np.mean)
for name, group in grouped:
mean = group.mean()
for idx in group.index:
self.assertEqual(transformed[idx], mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
filled = df.groupby(lambda x: x.month).fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_with_na(self):
index = Index(np.arange(10))
values = Series(np.ones(10), index)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
self.assertRaises(AttributeError, getattr, grouped, 'foo')
def test_series_describe_multikey(self):
raise nose.SkipTest
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.describe()
def test_frame_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.columns), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated)
# transform
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.columns), 4)
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
assert_almost_equal(transformed.xs(idx), mean)
# iterate
for weekday, group in grouped:
self.assert_(group.index[0].weekday() == weekday)
# groups / group_indices
groups = grouped.primary.groups
indices = grouped.primary.indices
for k, v in groups.iteritems():
samething = self.tsframe.index.take(indices[k])
self.assert_(np.array_equal(v, samething))
def test_frame_groupby_columns(self):
mapping = {
'A' : 0, 'B' : 0, 'C' : 1, 'D' : 1
}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.columns), 2)
# transform
tf = lambda x: x - x.mean()
groupedT = self.tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
self.assertEqual(len(v.columns), 2)
# tgroupby
grouping = {
'A' : 0,
'B' : 1,
'C' : 0,
'D' : 1
}
grouped = self.frame.tgroupby(grouping.get, np.mean)
self.assertEqual(len(grouped), len(self.frame.index))
self.assertEqual(len(grouped.columns), 2)
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [('a', '1', s[[0, 2]]),
('a', '2', s[[1]]),
('b', '1', s[[4]]),
('b', '2', s[[3, 5]])]
for i, (one, two, three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assert_(e1 == one)
self.assert_(e2 == two)
assert_series_equal(three, e3)
def test_multi_iter_frame(self):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1' : np.random.randn(6),
'v2' : np.random.randn(6),
'k1' : k1, 'k2' : k2},
index=['one', 'two', 'three', 'four', 'five', 'six'])
grouped = df.groupby(['k1', 'k2'])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [('a', '1', df.ix[idx[[4]]]),
('a', '2', df.ix[idx[[3, 5]]]),
('b', '1', df.ix[idx[[0, 2]]]),
('b', '2', df.ix[idx[[1]]])]
for i, (one, two, three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assert_(e1 == one)
self.assert_(e2 == two)
assert_frame_equal(three, e3)
# don't iterate through groups with no data
df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
grouped = df.groupby(['k1', 'k2'])
groups = {}
for a, b, gp in grouped:
groups[a, b] = gp
self.assertEquals(len(groups), 2)
def test_multi_func(self):
col1 = self.df['A']
col2 = self.df['B']
grouped = self.df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = self.df.groupby(['A', 'B']).mean()
assert_frame_equal(agged.ix[:, ['C', 'D']],
expected.ix[:, ['C', 'D']])
# some "groups" with no data
df = DataFrame({'v1' : np.random.randn(6),
'v2' : np.random.randn(6),
'k1' : np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2' : np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_groupby_multiple_columns(self):
data = self.df
grouped = data.groupby(['A', 'B'])
def _check_op(op):
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])
expected = dict((k, DataFrame(v)) for k, v in expected.iteritems())
expected = WidePanel.fromDict(expected).swapaxes(0, 1)
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
_check_op(lambda x: x.sum())
_check_op(lambda x: x.mean())
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_multiple_key(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum(1))
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum(1))
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(self):
# test that having an all-NA column doesn't mess you up
df = self.df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = self.df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(self):
grouped = self.df.groupby('A')
result = grouped.mean()
expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
def test_nonsense_func(self):
df = DataFrame([0])
self.assertRaises(Exception, df.groupby, lambda x: x + 'foo')
def test_cythonized_aggers(self):
data = {'A' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B' : ['A', 'B'] * 6,
'C' : np.random.randn(12)}
df = DataFrame(data)
df['C'][2:10:2] = nan
def _testit(op):
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C' : exp})
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for cat1, cat2, group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack()
result = op(grouped)['C']
assert_series_equal(result, exp)
_testit(lambda x: x.sum())
_testit(lambda x: x.mean())
def test_grouping_attrs(self):
deleveled = self.mframe.delevel()
grouped = deleveled.groupby(['label_0', 'label_1'])
for i, ping in enumerate(grouped.groupings):
the_counts = self.mframe.groupby(level=i).count()['A']
| assert_almost_equal(ping.counts, the_counts) | pandas.util.testing.assert_almost_equal |
from sanic import Sanic
from jinja2 import Template
import asyncio
import json
from sanic import response
import collections
import pandas as pd
import datetime
import aiohttp
from aiohttp import ClientConnectionError
import math
import random
import pkgutil
import os
BENCHMARK_TICKER = {'HSI': 'HK.800000', 'SPX': 'HK.800000'}
class WebApp:
def __init__(self, max_curve_rows=10000):
self.app = Sanic('Dashboard')
self.app_add_route(app=self.app)
self.hook_ip = None
self.algo_ips = dict()
self.algo_data = collections.defaultdict(lambda: collections.defaultdict(lambda: pd.DataFrame()))
self.algo_curves = collections.defaultdict(lambda: collections.defaultdict(lambda: pd.DataFrame()))
self.failed_algo = dict()
self.benchmark_df = collections.defaultdict(lambda: pd.DataFrame())
self.last_update_time = None
self.max_curve_rows = max_curve_rows
self.port = None
async def update_summary(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/summary') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content'], index=[0])
self.algo_data[algo_name]['summary'] = self.algo_data[algo_name]['summary'].append(resp_df).drop_duplicates(
['name'])
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_curves(self, algo_name):
if self.algo_curves[algo_name]['PV'].shape[0] == 0:
start_date = '2000-01-01'
else:
start_date = min(self.algo_curves[algo_name]['PV']['x']).strftime('%Y-%m-%d')
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/curves', params={'start_date': start_date}) as resp:
result = await resp.json()
result = result['return']['content']
for curve_type in result.keys():
tmp_df = pd.DataFrame(result[curve_type], index=[0]) if len(result[curve_type]) == 1 else pd.DataFrame(
result[curve_type])
tmp_df['x'] = pd.to_datetime(tmp_df['x'])
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].append(tmp_df)
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].drop_duplicates(['x'])
if self.algo_curves[algo_name][curve_type].shape[0] >= self.max_curve_rows:
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].iloc[
-self.max_curve_rows:]
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_curves[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_positions(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/positions') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content']['positions'], index=[0]) if len(
result) == 1 else pd.DataFrame(
result['return']['content']['positions'])
self.algo_data[algo_name]['positions'] = resp_df
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_settings(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/attributes') as resp:
result = await resp.json()
self.algo_data[algo_name]['settings'] = result['return']['content']
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_pending(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/pending') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content']['pending_orders'], index=[0]) if len(
result) == 1 else pd.DataFrame(
result['return']['content']['pending_orders'])
self.algo_data[algo_name]['pending'] = resp_df
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_completed(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/completed') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content']['completed_orders'], index=[0]) if len(
result) == 1 else pd.DataFrame(
result['return']['content']['completed_orders'])
self.algo_data[algo_name]['completed'] = resp_df
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_benchmark_data(self):
earliest_deployment_date = '2000-01-01'
for algo_name in self.algo_ips.keys():
earliest_deployment_date = min(earliest_deployment_date,
self.algo_data[algo_name]['summary']['initialized_date'][0])
for index in BENCHMARK_TICKER.keys():
if self.benchmark_df[index].shape[0] == 0:
async with aiohttp.ClientSession() as session:
async with session.get(self.hook_ip + '/historicals',
params={'ticker': BENCHMARK_TICKER[index], 'datatype': 'K_DAY',
'start_date': earliest_deployment_date,
'from_exchange': 'false'}) as resp:
result = await resp.json()
self.benchmark_df[index] = pd.read_json(result['return']['content'])
else:
start_date = self.benchmark_df[index]['datetime'].iloc[-1].strftime('%Y-%m-%d')
async with aiohttp.ClientSession() as session:
async with session.get(self.hook_ip + '/historicals',
params={'ticker': BENCHMARK_TICKER[index], 'datatype': 'K_DAY',
'start_date': start_date,
'from_exchange': 'false'}) as resp:
result = await resp.json()
tmp_df = pd.read_json(result['return']['content'])
self.benchmark_df[index] = self.benchmark_df[index].append(tmp_df).drop_duplicates(['datetime'])
# --------------------------------------- Return calculations ---------------------------------------------------
@staticmethod
def get_pnl_pct(df, start_date):
if 'x' in df.columns:
x = 'x'
y = 'y'
else:
if 'datetime' not in df.columns:
return 0, 0
x = 'datetime'
y = 'close'
df = df.loc[df[x] >= start_date]
if df.shape[0] == 0:
return 0, 0
pnl_pct = df[y].iloc[-1] / df[y].iloc[0] - 1
years = (df[x].iloc[-1] - df[x].iloc[0]).days / 365
annualized_pct = (1 + pnl_pct) ** (1 / years) - 1
return pnl_pct, annualized_pct
@staticmethod
def get_returns(df, start_date=None):
if 'x' in df.columns:
x = 'x'
y = 'y'
else:
if 'datetime' not in df.columns:
return (0, 0), (0, 0)
x = 'datetime'
y = 'close'
if start_date:
df = df.loc[df[x] >= start_date]
if df.shape[0] == 0:
return 0, 0
df[x] = pd.to_datetime(df[x])
df = df.set_index(x)
d_df = df.resample('D').last().dropna()
ytd_pv = d_df[y].iloc[max(-2, -d_df.shape[0])]
d_pct = d_df[y].iloc[-1] / ytd_pv - 1
d_return = d_pct * ytd_pv
m_df = df.resample('M').last()
last_month_pv = m_df[y].iloc[max(-2, -m_df.shape[0])]
m_pct = m_df[y].iloc[-1] / last_month_pv - 1
m_return = m_pct * last_month_pv
return (d_return, d_pct), (m_return, m_pct)
@staticmethod
def calc_returns(pv_df, benchmark_df):
pv_df['x'] = pd.to_datetime(pd.to_datetime(pv_df['x']).dt.strftime('%Y-%m-%d'))
pv_df = pv_df.set_index('x').resample('D').last().reset_index()
benchmark_df = benchmark_df.rename(columns={'datetime': 'x'})
pv_bmk_df = pv_df.merge(benchmark_df, how='right', on=['x']).dropna()[['x', 'y', 'close']].set_index('x')
if pv_bmk_df.shape[0] <= 1:
return (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0,0,0)
d_return = pv_bmk_df['y'].iloc[-1] - pv_bmk_df['y'].iloc[-2]
d_pct = d_return / pv_bmk_df['y'].iloc[-2]
bmk_pct = pv_bmk_df['close'].iloc[-1] / pv_bmk_df['close'].iloc[-2] - 1
bmk_return = pv_bmk_df['y'].iloc[-2] * bmk_pct
pv_bmk_m_df = pv_bmk_df.resample('M').last()
if pv_bmk_m_df.shape[0] == 1:
m_return = pv_bmk_df['y'].iloc[-1] - pv_bmk_df['y'].iloc[0]
m_pct = m_return / pv_bmk_df['y'].iloc[0]
bmk_m_pct = pv_bmk_df['close'].iloc[-1] / pv_bmk_df['close'].iloc[0] - 1
bmk_m_return = bmk_m_pct * pv_bmk_df['y'].iloc[0]
else:
m_return = pv_bmk_m_df['y'].iloc[-1] - pv_bmk_m_df['y'].iloc[-2]
m_pct = m_return / pv_bmk_m_df['y'].iloc[-2]
bmk_m_pct = pv_bmk_m_df['close'].iloc[-1] / pv_bmk_m_df['close'].iloc[0] - 1
bmk_m_return = bmk_m_pct * pv_bmk_m_df['y'].iloc[0]
total_pnl = pv_bmk_df['y'].iloc[-1] - pv_bmk_df['y'].iloc[0]
total_pct = total_pnl / pv_bmk_df['y'].iloc[0]
total_bmk_pct = pv_bmk_df['close'].iloc[-1] / pv_bmk_df['close'].iloc[0] - 1
total_bmk_pnl = total_bmk_pct * pv_bmk_df['y'].iloc[0]
annualized_pct = (total_pct + 1) ** (365 / (pv_bmk_df.index[-1] - pv_bmk_df.index[0]).days)
bmk_annualized_pct = (total_bmk_pct + 1) ** (365 / (pv_bmk_df.index[-1] - pv_bmk_df.index[0]).days)
# Sharpe, Beta,
# TODO: 3 months
three_month_daily_df = pv_bmk_df.iloc[-min(pv_bmk_df.shape[0], 253):]
three_month_daily_df['y_ret'] = three_month_daily_df['y'] / three_month_daily_df['y'].shift(1) - 1
three_month_daily_df['bmk_ret'] = three_month_daily_df['close'] / three_month_daily_df['close'].shift(1) - 1
three_month_daily_df = three_month_daily_df.dropna()
if three_month_daily_df.shape[0] <= 1:
beta = 0
sharpe = 0
benchmark_sharpe = 0
else:
three_month_daily_df = three_month_daily_df[['y_ret', 'bmk_ret']]
beta = three_month_daily_df.cov().iloc[0, 1] / (three_month_daily_df['bmk_ret'].std() ** 2)
sharpe = three_month_daily_df['y_ret'].mean() / three_month_daily_df['y_ret'].std() * (251 ** (1/2))
benchmark_sharpe = three_month_daily_df['bmk_ret'].mean() / three_month_daily_df['bmk_ret'].std()
return (d_return, d_pct), (m_return, m_pct), (bmk_return, bmk_pct), (bmk_m_return, bmk_m_pct), \
(total_pct, total_bmk_pct), (total_pnl, total_bmk_pnl), (annualized_pct, bmk_annualized_pct), (sharpe, beta, benchmark_sharpe)
async def update_returns(self, algo_name):
await self.update_benchmark_data()
benchmark_dfs = self.benchmark_df
algo_benchmark = self.algo_data[algo_name]['summary']['benchmark'][0]
pv_df = self.algo_curves[algo_name]['PV'].copy()
bmk_df = benchmark_dfs[algo_benchmark]
(d_return, d_pct), (m_return, m_pct), (bmk_return, bmk_pct), (bmk_m_return, bmk_m_pct), \
(total_pct, total_bmk_pct), (total_pnl, total_bmk_pnl), (annualized_pct, bmk_annualized_pct), (sharpe, beta, benchmark_sharpe) = self.calc_returns(pv_df, bmk_df)
self.algo_data[algo_name]['summary']['benchmark_net_pnl_pct'] = total_bmk_pct
self.algo_data[algo_name]['summary']['benchmark_annualized_return'] = bmk_annualized_pct
self.algo_data[algo_name]['summary']['daily_return'] = d_return
self.algo_data[algo_name]['summary']['daily_return_pct'] = d_pct
self.algo_data[algo_name]['summary']['monthly_return'] = m_return
self.algo_data[algo_name]['summary']['monthly_return_pct'] = m_pct
self.algo_data[algo_name]['summary']['benchmark_daily_pct'] = bmk_pct
self.algo_data[algo_name]['summary']['benchmark_monthly_pct'] = bmk_m_pct
self.algo_data[algo_name]['summary']['gross_pnl'] = total_pnl
self.algo_data[algo_name]['summary']['gross_pnl_pct'] = total_pct
self.algo_data[algo_name]['summary']['net_pnl'] = total_pnl - self.algo_data[algo_name]['summary']['txn_cost_total']
self.algo_data[algo_name]['summary']['net_pnl_pct'] = self.algo_data[algo_name]['summary']['net_pnl'] / self.algo_data[algo_name]['summary']['initial_capital']
self.algo_data[algo_name]['summary']['annualized_return'] = annualized_pct
self.algo_data[algo_name]['summary']['sharpe'] = sharpe
self.algo_data[algo_name]['summary']['beta'] = beta
self.algo_data[algo_name]['summary']['benchmark_sharpe'] = benchmark_sharpe
# sharpe
# benchmark_sharpe
# sortino
# benchmark_sortino
# win_pct
# benchmark_win_pct
pass
def run(self, port, hook_ip):
loop = asyncio.get_event_loop()
self.port = port
self.hook_ip = hook_ip
async def _run():
tasks = list()
web_server = self.app.create_server(host='0.0.0.0', return_asyncio_server=True, port=port)
tasks.append(web_server)
await asyncio.gather(*tasks)
loop.create_task(_run())
loop.run_forever()
# -------------------------------------------- WebApp ----------------------------------------------------------
async def download_data_from_algos(self, algo_name):
await self.update_summary(algo_name)
await self.update_positions(algo_name)
await self.update_pending(algo_name)
await self.update_completed(algo_name)
await self.update_settings(algo_name)
await self.update_curves(algo_name)
async def get_combined_data(self):
# for algo_name in self.algo_ips.keys():
# await self.update_summary(algo_name)
# await self.update_positions(algo_name)
# await self.update_pending(algo_name)
# await self.update_completed(algo_name)
# await self.update_curves(algo_name)
# await self.update_benchmark_data()
# await self.update_returns(algo_name)
curves = collections.defaultdict(lambda: list())
data = collections.defaultdict(lambda: 0.0)
pending_list = list()
completed_list = list()
positions_list = list()
data['name'] = 'combined'
# TODO: changeable
data['benchmark'] = 'HSI'
data['status'] = 'Running'
data['ip'] = 'http://127.0.0.1:' + str(self.port)
earliest_deployment_date = '2100-01-01'
max_days_since_deployment = 0
for algo_name in self.algo_ips.keys():
# Curves
max_days_since_deployment = max(max_days_since_deployment,
int(self.algo_data[algo_name]['summary']['days_since_deployment'][0]))
earliest_deployment_date = min(earliest_deployment_date,
self.algo_data[algo_name]['summary']['initialized_date'][0])
curves['PV'].append(self.algo_curves[algo_name]['PV'].set_index('x').rename(columns={'y': algo_name}))
curves['EV'].append(self.algo_curves[algo_name]['EV'].set_index('x').rename(columns={'y': algo_name}))
curves['Cash'].append(self.algo_curves[algo_name]['Cash'].set_index('x').rename(columns={'y': algo_name}))
# Values
algo_summary = self.algo_data[algo_name]['summary'].to_dict('records')[0]
data['n_trades'] += algo_summary['n_trades']
data['txn_cost_total'] += algo_summary['txn_cost_total']
data['initial_capital'] += algo_summary['initial_capital']
data['gross_pnl'] += algo_summary['gross_pnl']
data['net_pnl'] += algo_summary['net_pnl']
data['gross_pnl'] += algo_summary['gross_pnl']
# Orders & Positions
pending_list.append(self.algo_data[algo_name]['pending'])
completed_list.append(self.algo_data[algo_name]['completed'])
positions_list.append(self.algo_data[algo_name]['positions'])
# Sums up curves for ALL strategies
pv_df = pd.DataFrame(pd.concat(curves['PV'], axis=1).fillna(method='bfill').fillna(method='ffill').sum(axis=1),
columns=['y']).reset_index()
pv_df.columns = ['x', 'y']
pv_df = pv_df.drop_duplicates('x', keep='last')
ev_df = pd.DataFrame(pd.concat(curves['EV'], axis=1).fillna(method='bfill').fillna(method='ffill').sum(axis=1),
columns=['y']).reset_index()
ev_df.columns = ['x', 'y']
ev_df = ev_df.drop_duplicates('x', keep='last')
cash_df = pd.DataFrame( | pd.concat(curves['Cash'], axis=1) | pandas.concat |
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import svm
labeled_images = pd.read_csv('train.csv')
images = labeled_images.iloc[:,1:].astype('float32')
labels = labeled_images.iloc[:,:1]
train_images, test_images,train_labels, test_labels = train_test_split(images, labels, train_size=0.8, test_size=0.2, random_state=42)
train_images /= 255
test_images /= 255
# param_grid = {
# 'C':[1,10,100,1000],
# 'kernel':('linear','rbf'),
# 'gamma':[0.001,0.01,0.1] }
clf = svm.SVC(C=10,kernel='rbf',gamma=0.01)
clf.fit(train_images, train_labels.values.ravel())
print(clf.score(test_images,test_labels))
test_data = | pd.read_csv('test.csv') | pandas.read_csv |
from __future__ import division, print_function
import pandas as pd
import random
from timezonefinder import TimezoneFinder
def convert_to_local_time(row):
""" Convert to UTC time
# Under construction
Args: dataframe.row
"""
if row.timezone < 0:
return pd.to_datetime(row.datetime).tz_localize('Etc/GMT+'+str(abs(row.timezone)))
else:
return pd.to_datetime(row.datetime).tz_localize('Etc/GMT-'+str(abs(row.timezone)))
return pd.to_datetime(row.datetime).tz_localize('Etc/GMT'+str(-row.timezone))
def convert_to_mexico_time(row):
""" Convert from UTC local time to Mexico Time.
# Under construction!
Args: dataframe.row
"""
return pd.to_datetime(row.datetime_local).tz_convert('Mexico/General')
def convert_to_year(row):
""" Temporal solution. Probably a future Bug
# TODO: Fix to a better soluation
Push data to be in one year only Switch requirements.
"""
if row.datetime_mexico.year > 2014:
return (row.datetime_mexico - pd.Timedelta(days=365))
elif row.datetime_mexico.year < 2014:
return (row.datetime_mexico + pd.Timedelta(days=365))
else:
return (row.datetime_mexico)
def get_solar_data(path, filename, *args, **kwargs):
""" Read data from folder and process it.
# Under construction!
Args:
path
filename
"""
data = pd.read_csv(path + filename + '.csv')
data = data.drop(data.columns[0], axis=1)
try:
data.index = | pd.to_datetime(data[['Year', 'Month', 'Day', 'Hour', 'Minute']], utc=True) | pandas.to_datetime |
"""tdub command line interface."""
from __future__ import annotations
# stdlib
import json
import logging
import os
import shutil
from pathlib import PosixPath
# third party
import click
# tdub
from tdub import setup_logging
setup_logging()
log = logging.getLogger("tdub-cli")
@click.group(context_settings=dict(max_content_width=82, help_option_names=['-h', '--help']))
def cli():
"""Top Level CLI function."""
pass
@cli.group("train")
def train():
"""Tasks to perform machine learning steps."""
pass
@cli.group("apply")
def cli_apply():
"""Tasks to apply machine learning models to data."""
pass
@cli.group("rex")
def rex():
"""Tasks interacting with TRExFitter results."""
pass
@cli.group("misc")
def misc():
"""Tasks under a miscellaneous umbrella."""
@train.command("prep")
@click.argument("datadir", type=click.Path(resolve_path=True, exists=True))
@click.argument("region", type=click.Choice(["1j1b", "2j1b", "2j2b"]))
@click.argument("outdir", type=click.Path(resolve_path=True))
@click.option("-p", "--pre-exec", type=click.Path(resolve_path=True), help="Python code to pre-execute")
@click.option("-n", "--nlo-method", type=str, default="DR", help="tW simluation NLO method", show_default=True)
@click.option("-x", "--override-selection", type=str, help="override selection with contents of file")
@click.option("-t", "--use-tptrw", is_flag=True, help="apply top pt reweighting")
@click.option("-r", "--use-trrw", is_flag=True, help="apply top recursive reweighting")
@click.option("-i", "--ignore-list", type=str, help="variable ignore list file")
@click.option("-m", "--multiple-ttbar-samples", is_flag=True, help="use multiple ttbar MC samples")
@click.option("-a", "--use-inc-af2", is_flag=True, help="use inclusive af2 samples")
@click.option("-f", "--bkg-sample-frac", type=float, help="use a fraction of the background")
@click.option("-d", "--use-dilep", is_flag=True, help="train with dilepton samples")
def train_prep(
datadir,
region,
outdir,
pre_exec,
nlo_method,
override_selection,
use_tptrw,
use_trrw,
ignore_list,
multiple_ttbar_samples,
use_inc_af2,
bkg_sample_frac,
use_dilep,
):
"""Prepare data for training."""
if pre_exec is not None:
exec(PosixPath(pre_exec).read_text())
from tdub.ml_train import prepare_from_root, persist_prepared_data
from tdub.data import avoids_for, quick_files
from tdub.frames import drop_cols
qf = quick_files(datadir)
sig_files = qf[f"tW_{nlo_method}"] if use_dilep else qf[f"tW_{nlo_method}_inc"]
if multiple_ttbar_samples:
bkg_files = qf["ttbar_inc_AFII"] + qf["ttbar_PS"]
elif use_inc_af2:
sig_files = qf[f"tW_{nlo_method}_inc_AFII"]
bkg_files = qf["ttbar_inc_AFII"]
else:
bkg_files = qf["ttbar"] if use_dilep else qf["ttbar_inc"]
override_sel = override_selection
if override_sel:
override_sel = PosixPath(override_sel).read_text().strip()
df, y, w = prepare_from_root(
sig_files,
bkg_files,
region,
weight_mean=1.0,
override_selection=override_sel,
use_tptrw=use_tptrw,
use_trrw=use_trrw,
bkg_sample_frac=bkg_sample_frac,
)
drop_cols(df, *avoids_for(region))
if ignore_list:
drops = PosixPath(ignore_list).read_text().strip().split()
drop_cols(df, *drops)
outdir = PosixPath(outdir)
persist_prepared_data(outdir, df, y, w)
(outdir / "region.txt").write_text(f"{region}\n")
(outdir / "nlo_method.txt").write_text(f"{nlo_method}\n")
(outdir / "files_sig.txt").write_text("{}\n".format("\n".join(sig_files)))
(outdir / "files_bkg.txt").write_text("{}\n".format("\n".join(bkg_files)))
@train.command("single")
@click.argument("datadir", type=click.Path(resolve_path=True, exists=True))
@click.argument("outdir", type=click.Path(resolve_path=True))
@click.option("-p", "--pre-exec", type=click.Path(exists=True, resolve_path=True), help="Python code to pre-execute")
@click.option("-s", "--test-size", type=float, default=0.40, help="training test size", show_default=True)
@click.option("-e", "--early-stop", type=int, default=10, help="number of early stopping rounds", show_default=True)
@click.option("-k", "--use-sklearn", is_flag=True, help="use sklearn instead of lgbm")
@click.option("-g", "--use-xgboost", is_flag=True, help="use xgboost instead of lgbm")
@click.option("-l", "--learning-rate", type=float, default=0.1, help="learning_rate model parameter", show_default=True)
@click.option("-n", "--num-leaves", type=int, default=16, help="num_leaves model parameter", show_default=True)
@click.option("-m", "--min-child-samples", type=int, default=500, help="min_child_samples model parameter", show_default=True)
@click.option("-d", "--max-depth", type=int, default=5, help="max_depth model parameter", show_default=True)
@click.option("-r", "--reg-lambda", type=float, default=0, help="lambda (L2) regularization", show_default=True)
@click.option("-a", "--auto-region", is_flag=True, help="Use parameters associated with region", show_default=True)
def train_single(
datadir,
outdir,
pre_exec,
test_size,
early_stop,
use_sklearn,
use_xgboost,
learning_rate,
num_leaves,
min_child_samples,
max_depth,
reg_lambda,
auto_region,
):
"""Execute single training round."""
if pre_exec is not None:
exec(PosixPath(pre_exec).read_text())
from tdub.ml_train import single_training
import pandas as pd
import numpy as np
datadir = PosixPath(datadir)
df = pd.read_hdf(datadir / "df.h5", "df")
y = np.load(datadir / "labels.npy")
w = np.load(datadir / "weights.npy")
df.selection_used = (
datadir / "selection.txt"
).read_text().strip()
extra_sum = {
"region": PosixPath(datadir / "region.txt").read_text().strip(),
"nlo_method": PosixPath(datadir / "nlo_method.txt").read_text().strip(),
}
train_axes = dict(
learning_rate=learning_rate,
num_leaves=num_leaves,
min_child_samples=min_child_samples,
max_depth=max_depth,
reg_lambda=reg_lambda,
)
if auto_region:
from tdub.ml_train import default_bdt_parameters
train_axes = default_bdt_parameters(extra_sum["region"])
else:
train_axes = dict(
learning_rate=learning_rate,
num_leaves=num_leaves,
min_child_samples=min_child_samples,
max_depth=max_depth,
reg_lambda=reg_lambda,
)
single_training(
df,
y,
w,
train_axes,
outdir,
test_size=test_size,
early_stopping_rounds=early_stop,
extra_summary_entries=extra_sum,
use_sklearn=use_sklearn,
use_xgboost=use_xgboost,
)
@train.command("scan")
@click.argument("datadir", type=click.Path(exists=True, resolve_path=True))
@click.argument("workspace", type=click.Path(exists=False))
@click.option("-p", "--pre-exec", type=click.Path(resolve_path=True), help="Python code to pre-execute")
@click.option("-e", "--early-stop", type=int, default=10, help="number of early stopping rounds", show_default=True)
@click.option("-s", "--test-size", type=float, default=0.40, help="training test size", show_default=True)
@click.option("--overwrite", is_flag=True, help="overwrite existing workspace")
@click.option("--and-submit", is_flag=True, help="submit the condor jobs")
def train_scan(
datadir,
workspace,
pre_exec,
early_stop,
test_size,
overwrite,
and_submit,
):
"""Perform a parameter scan via condor jobs.
DATADIR points to the intput ROOT files, training is performed on
the REGION and all output is saved to WORKSPACE.
$ tdub train scan /data/path 2j2b scan_2j2b
"""
if pre_exec is not None:
exec(PosixPath(pre_exec).read_text())
from tdub.batch import create_condor_workspace
import tdub.config
import itertools
ws = create_condor_workspace(workspace, overwrite=overwrite)
(ws / "res").mkdir()
runs = []
i = 0
if pre_exec is None:
pre_exec = "_NONE"
else:
pre_exec = str(PosixPath(pre_exec).resolve())
pd = tdub.config.DEFAULT_SCAN_PARAMETERS
itr = itertools.product(
pd.get("max_depth"),
pd.get("num_leaves"),
pd.get("learning_rate"),
pd.get("min_child_samples"),
| pd.get("reg_lambda") | pandas.get |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_url_tls() -> str:
conn = os.environ["POSTGRES_URL_TLS"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_rootcert() -> str:
cert = os.environ["POSTGRES_ROOTCERT"]
return cert
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df = df.sort_values("test_int").reset_index(drop=True)
| assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
import cobra
import re
import numpy as np
import pandas as pd
from cobra.flux_analysis.sampling import OptGPSampler
from cobra.core.reaction import Reaction as cobraReaction
from cobra.util.solver import set_objective
import rpy2.robjects as ro
from rpy2.robjects import numpy2ri
import warnings
import copy
from six import iteritems
from Order_module import FluxOrder
from Data_module import DataParser
from Helper_methods import isCandidatePair
ro.r['source']('Rfunctions.R')
FVA = ro.r['FVA']
sampleFluxCone = ro.r['sampleFluxCone']
numpy2ri.activate()
class Model:
"""
Methods to load the GEM, convert it to the required form and update the
flux bounds to match the carbon source.
"""
def __init__(self, fileName, workDir=None, v_eps=1e-9, verbose=True):
self.workDir = workDir
self.name = ''
self.carbonSource = 'all'
self.__loadModel(fileName, workDir)
self.__enableUptakeOfCarbonSources()
self.__removeBlockedReactions(v_eps)
self.__splitReversibleReactions()
self.__removeReactionsWithZeroUpperBound()
self. __addMetabolicMacrosystems()
self.geneIDs = [gene.id for gene in self.GEM.genes]
self.numberOfReactions = len(self.GEM.reactions)
self.numberOfMetabolites = len(self.GEM.metabolites)
self.original_lb = self.getLowerBounds()
self.original_ub = self.getUpperBounds()
self.verbose = verbose
print('Split GEM generated with ' + str(self.numberOfReactions)
+ ' non-blocked reactions and ' + str(self.numberOfMetabolites)
+ ' metabolites')
def getLowerBounds(self):
return np.array([rxn.lower_bound for rxn in self.GEM.reactions])
def getUpperBounds(self):
return np.array([rxn.upper_bound for rxn in self.GEM.reactions])
def setLowerBounds(self, lb):
for i, rxn in enumerate(self.GEM.reactions):
rxn.lower_bound = lb[i]
def setUpperBounds(self, ub):
for i, rxn in enumerate(self.GEM.reactions):
rxn.upper_bound = ub[i]
def getStoichiometricMatrix(self):
return np.array(
cobra.util.array.create_stoichiometric_matrix(self.GEM, array_type='dense'))
def getSubsystems(self):
return np.array([rxn.subsystem for rxn in self.GEM.reactions])
def getMacrosystems(self):
return np.array([rxn.macrosystem for rxn in self.GEM.reactions])
def getReactionNames(self):
return [rxn.name for rxn in self.GEM.reactions]
def getReactionIDs(self):
return [rxn.id for rxn in self.GEM.reactions]
def setCarbonSource(self, carbonSource, uptakeRate=20, fractionOfBiomassOptimum=0.95):
"""
Sets current carbon source: opens importer for carbon source, closes all order
organic imports and maximizes biomass. Wrapper to the two methods that follow.
"""
self.setLowerBounds(self.original_lb)
self.setUpperBounds(self.original_ub)
if carbonSource.lower() not in 'all':
self.updateExchangeReactionBounds(carbonSource, carbonUptakeRate=uptakeRate)
self.setMinimumBiomassProduction(fractionOfOptimum=fractionOfBiomassOptimum)
def __loadModel(self, fileName, workDir=None):
"""
Reads the SBML file containing the GEM. Removes blocked
reactions i.e., reactions that cannot carry flux in Sv = 0, and splits
reversible reactions into two irreversible reactions.
Parameters
----------
fileName: string
The path to the file containing the SBML model
Returns
-------
GEM: cobrapy class model
The transformed genome-scale model
"""
if workDir is not None:
path2File = workDir + '/' + fileName
else:
path2File = fileName
modelName, fileExtension = fileName.split('.')
self.name = modelName
warnings.filterwarnings('ignore')
if fileExtension in ['xml', 'sbml']:
self.GEM = cobra.io.read_sbml_model(path2File)
elif fileExtension == 'json':
self.GEM = cobra.io.load_json_model(path2File)
elif fileExtension == 'mat':
self.GEM = cobra.io.load_matlab_model(path2File)
warnings.resetwarnings()
def __addMetabolicMacrosystems(self):
df = | pd.read_excel(self.workDir + '/' + self.name + '_subsystems.xlsx') | pandas.read_excel |
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
import unittest
import pandas as pd
import numpy as np
from tickcounter.questionnaire import Encoder, MultiEncoder
from pandas.testing import assert_frame_equal, assert_series_equal
class TestMultiEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestMultiEncoder, cls).setUpClass()
cls.original = pd.read_csv("test/test_data/mental_health/data.csv")
cls.scale_1 = {"No": -1, "Yes": 1, "Don't know": 0, "Some of them": 0, "Not sure": 0, "Maybe": 0}
cls.scale_2 = {"1-5": 1, "6-25": 2, "26-100": 3, "100-500": 4, "500-1000": 5, "More than 1000": 6}
cls.scale_3 = {"Very difficult": -2, "Somewhat difficult": -1, "Don't know": 0, "Somewhat easy": 1, "Very easy": 2}
cls.scale_4 = {"Never": 1, "Rarely": 2, "Sometimes": 3, "Often": 4}
cls.e1 = Encoder({"No": -1, "Yes": 1, "Don't know": 0, "Some of them": 0, "Not sure": 0, "Maybe": 0}, default=0, neutral=0)
cls.e2 = Encoder({"1-5": 1, "6-25": 2, "26-100": 3, "100-500": 4, "500-1000": 5, "More than 1000": 6})
cls.e3 = Encoder({"Very difficult": -2, "Somewhat difficult": -1, "Don't know": 0, "Somewhat easy": 1, "Very easy": 2}, default=0)
cls.e4 = Encoder({"Never": 1, "Rarely": 2, "Sometimes": 3, "Often": 4}, neutral=3)
cls.col_1 = ['self_employed', 'family_history', 'treatment', 'remote_work',
'tech_company', 'benefits', 'care_options', 'wellness_program',
'seek_help', 'anonymity', 'mental_health_consequence',
'phys_health_consequence', 'coworkers', 'supervisor',
'mental_health_interview', 'phys_health_interview',
'mental_vs_physical', 'obs_consequence']
cls.col_2 = ['no_employees']
cls.col_3 = ['leave']
cls.col_4 = ['work_interfere']
cls.me1 = MultiEncoder([TestMultiEncoder.e1,
TestMultiEncoder.e2,
TestMultiEncoder.e3,
TestMultiEncoder.e4,
])
cls.me2 = MultiEncoder(TestMultiEncoder.e3)
cls.me3 = MultiEncoder(TestMultiEncoder.e1)
def setUp(self):
self.df = TestMultiEncoder.original.copy()
def test_transform_default_ss(self):
result_1 = TestMultiEncoder.me2.transform(self.df['self_employed'])
result_2 = TestMultiEncoder.me1.transform(self.df['self_employed'])
assert_frame_equal(self.df, TestMultiEncoder.original)
expected_1 = self.df['self_employed']
expected_2 = self.df['self_employed'].replace(TestMultiEncoder.scale_1)
expected_2 = expected_2.fillna(0)
assert_series_equal(result_1, expected_1, check_dtype=False)
assert_series_equal(result_2, expected_2, check_dtype=False)
def test_transform_default_df(self):
result = TestMultiEncoder.me1.transform(self.df)
assert_frame_equal(self.df, TestMultiEncoder.original)
expected = self.df.copy()
expected[TestMultiEncoder.col_1] = expected[TestMultiEncoder.col_1].replace(TestMultiEncoder.scale_1)
expected[TestMultiEncoder.col_1] = expected[TestMultiEncoder.col_1].fillna(0)
expected[TestMultiEncoder.col_2] = expected[TestMultiEncoder.col_2].replace(TestMultiEncoder.scale_2)
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].replace(TestMultiEncoder.scale_3)
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].fillna(0)
expected[TestMultiEncoder.col_4] = expected[TestMultiEncoder.col_4].replace(TestMultiEncoder.scale_4)
assert_frame_equal(result, expected, check_dtype=False)
def test_transform_rule_map(self):
pass
def test_transform_ignore_list(self):
ignore_list = ['self_employed', 'family_history', 'benefits', 'work_interfere']
result = TestMultiEncoder.me1.transform(self.df, ignore_list = ignore_list)
assert_frame_equal(self.df, TestMultiEncoder.original)
expected = self.df.copy()
expected[TestMultiEncoder.col_1] = expected[TestMultiEncoder.col_1].replace(TestMultiEncoder.scale_1)
expected[TestMultiEncoder.col_1] = expected[TestMultiEncoder.col_1].fillna(0)
expected[TestMultiEncoder.col_2] = expected[TestMultiEncoder.col_2].replace(TestMultiEncoder.scale_2)
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].replace(TestMultiEncoder.scale_3)
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].fillna(0)
expected[TestMultiEncoder.col_4] = expected[TestMultiEncoder.col_4].replace(TestMultiEncoder.scale_4)
expected[ignore_list] = self.df[ignore_list]
assert_frame_equal(result, expected, check_dtype=False)
def test_transform_return_rules(self):
result, rule = TestMultiEncoder.me2.transform(self.df, return_rule=True)
assert_frame_equal(self.df, TestMultiEncoder.original)
expected = self.df.copy()
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].replace(TestMultiEncoder.scale_3)
expected[TestMultiEncoder.col_3] = expected[TestMultiEncoder.col_3].fillna(0)
expected_rule = pd.Series(index=self.df.columns, dtype=str)
expected_rule[TestMultiEncoder.col_3] = TestMultiEncoder.e3.name
assert_frame_equal(result, expected, check_dtype=False)
assert_series_equal(rule, expected_rule)
def test_transform_mode(self):
result_1 = TestMultiEncoder.me1.transform(self.df, mode='any')
result_2, rule = TestMultiEncoder.me1.transform(self.df, mode='strict', return_rule=True)
assert_frame_equal(self.df, TestMultiEncoder.original)
expected_1 = self.df.copy()
expected_1[TestMultiEncoder.col_1] = expected_1[TestMultiEncoder.col_1].replace(TestMultiEncoder.scale_1)
expected_1[TestMultiEncoder.col_1] = expected_1[TestMultiEncoder.col_1].fillna(0)
expected_1[TestMultiEncoder.col_2] = expected_1[TestMultiEncoder.col_2].replace(TestMultiEncoder.scale_2)
expected_1[TestMultiEncoder.col_3] = expected_1[TestMultiEncoder.col_3].replace(TestMultiEncoder.scale_3)
expected_1[TestMultiEncoder.col_3] = expected_1[TestMultiEncoder.col_3].fillna(0)
expected_1[TestMultiEncoder.col_4] = expected_1[TestMultiEncoder.col_4].replace(TestMultiEncoder.scale_4)
expected_2 = expected_1.copy()
expected_2[TestMultiEncoder.col_1] = TestMultiEncoder.original[TestMultiEncoder.col_1]
expected_rule = pd.Series(index=self.df.columns, dtype=str)
expected_rule[TestMultiEncoder.col_2] = TestMultiEncoder.e2.name
expected_rule[TestMultiEncoder.col_3] = TestMultiEncoder.e3.name
expected_rule[TestMultiEncoder.col_4] = TestMultiEncoder.e4.name
assert_frame_equal(result_1, expected_1, check_dtype=False)
| assert_frame_equal(result_2, expected_2, check_dtype=False) | pandas.testing.assert_frame_equal |
""" ``SNPs`` reads, writes, merges, and remaps genotype / raw data files.
"""
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from itertools import groupby, count
import logging
import os
import re
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from snps.ensembl import EnsemblRestClient
from snps.resources import Resources
from snps.io import Reader, Writer, get_empty_snps_dataframe
from snps.utils import Parallelizer
logger = logging.getLogger(__name__)
class SNPs:
def __init__(
self,
file="",
only_detect_source=False,
assign_par_snps=False,
output_dir="output",
resources_dir="resources",
deduplicate=True,
deduplicate_XY_chrom=True,
deduplicate_MT_chrom=True,
parallelize=False,
processes=os.cpu_count(),
rsids=(),
):
""" Object used to read, write, and remap genotype / raw data files.
Parameters
----------
file : str or bytes
path to file to load or bytes to load
only_detect_source : bool
only detect the source of the data
assign_par_snps : bool
assign PAR SNPs to the X and Y chromosomes
output_dir : str
path to output directory
resources_dir : str
name / path of resources directory
deduplicate : bool
deduplicate RSIDs and make SNPs available as `SNPs.duplicate`
deduplicate_XY_chrom : bool
deduplicate alleles in the non-PAR regions of X and Y for males; see
`SNPs.discrepant_XY`
deduplicate_MT_chrom : bool
deduplicate alleles on MT; see `SNPs.heterozygous_MT`
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
rsids : tuple, optional
rsids to extract if loading a VCF file
"""
self._file = file
self._only_detect_source = only_detect_source
self._snps = get_empty_snps_dataframe()
self._duplicate = get_empty_snps_dataframe()
self._discrepant_XY = get_empty_snps_dataframe()
self._heterozygous_MT = get_empty_snps_dataframe()
self._discrepant_vcf_position = get_empty_snps_dataframe()
self._discrepant_merge_positions = pd.DataFrame()
self._discrepant_merge_genotypes = pd.DataFrame()
self._source = []
self._phased = False
self._build = 0
self._build_detected = False
self._output_dir = output_dir
self._resources = Resources(resources_dir=resources_dir)
self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes)
if file:
d = self._read_raw_data(file, only_detect_source, rsids)
# Replace multiple rsids separated by commas in index with the first rsid. E.g. rs1,rs2 -> rs1
multi_rsids = {
multi_rsid: multi_rsid.split(",")[0]
for multi_rsid in list(
filter(lambda x: len(x.split(",")) > 1, d["snps"].index)
)
}
d["snps"].rename(index=multi_rsids, inplace=True)
self._snps = d["snps"]
self._source = (
d["source"].split(", ") if ", " in d["source"] else [d["source"]]
)
self._phased = d["phased"]
if not self._snps.empty:
self.sort()
if deduplicate:
self._deduplicate_rsids()
# prefer to use SNP positions to detect build
self._build = self.detect_build()
self._build_detected = True if self._build else False
if not self._build_detected:
# use build detected from `read` method or comments, if any
self._build = d["build"]
if not self._build:
self._build = 37 # assume Build 37 / GRCh37 if not detected
else:
self._build_detected = True
if assign_par_snps:
self._assign_par_snps()
self.sort()
if deduplicate_XY_chrom:
if self.determine_sex() == "Male":
self._deduplicate_XY_chrom()
if deduplicate_MT_chrom:
self._deduplicate_MT_chrom()
else:
logger.warning("no SNPs loaded...")
def __repr__(self):
return f"SNPs({self._file[0:50]!r})"
@property
def source(self):
""" Summary of the SNP data source(s).
Returns
-------
str
Data source(s) for this ``SNPs`` object, separated by ", ".
"""
return ", ".join(self._source)
@property
def snps(self):
""" Normalized SNPs.
Notes
-----
Throughout ``snps``, the "normalized ``snps`` dataframe" is defined as follows:
============= =================================== ===============
Column Description `pandas` dtype
============= =================================== ===============
rsid [*]_ SNP ID object (string)
chrom Chromosome of SNP object (string)
pos Position of SNP (relative to build) uint32
genotype [*]_ Genotype of SNP object (string)
============= =================================== ===============
.. [*] Dataframe index
.. [*] Genotype can be null, length 1, or length 2. Specifically, genotype is null if not
called or unavailable. Otherwise, for autosomal chromosomes, genotype is two alleles.
For the X and Y chromosomes, male genotypes are one allele in the non-PAR regions
(assuming `deduplicate_XY_chrom`). For the MT chromosome, genotypes are one allele
(assuming `deduplicate_MT_chrom`).
Returns
-------
pandas.DataFrame
normalized ``snps`` dataframe
"""
return self._snps
@property
def duplicate(self):
""" Duplicate SNPs.
A duplicate SNP has the same RSID as another SNP. The first occurrence
of the RSID is not considered a duplicate SNP.
Returns
-------
pandas.DataFrame
normalized ``snps`` dataframe
"""
return self._duplicate
@property
def discrepant_XY(self):
""" Discrepant XY SNPs.
A discrepant XY SNP is a heterozygous SNP in the non-PAR region of the X
or Y chromosome found during deduplication for a detected male genotype.
Returns
-------
pandas.DataFrame
normalized ``snps`` dataframe
"""
return self._discrepant_XY
@property
def heterozygous_MT(self):
""" Heterozygous SNPs on the MT chromosome found during deduplication.
Returns
-------
pandas.DataFrame
normalized ``snps`` dataframe
"""
return self._heterozygous_MT
@property
def discrepant_vcf_position(self):
""" SNPs with discrepant positions discovered while saving VCF.
Returns
-------
pandas.DataFrame
normalized ``snps`` dataframe
"""
return self._discrepant_vcf_position
@property
def discrepant_merge_positions(self):
""" SNPs with discrepant positions discovered while merging SNPs.
Notes
-----
Definitions of columns in this dataframe are as follows:
============== ===========
Column Description
============== ===========
rsid SNP ID
chrom Chromosome of existing SNP
pos Position of existing SNP
genotype Genotype of existing SNP
chrom_added Chromosome of added SNP
pos_added Position of added SNP (discrepant with pos)
genotype_added Genotype of added SNP
============== ===========
Returns
-------
pandas.DataFrame
"""
return self._discrepant_merge_positions
@property
def discrepant_merge_genotypes(self):
""" SNPs with discrepant genotypes discovered while merging SNPs.
Notes
-----
Definitions of columns in this dataframe are as follows:
=============== ===========
Column Description
=============== ===========
rsid SNP ID
chrom Chromosome of existing SNP
pos Position of existing SNP
genotype Genotype of existing SNP
chrom_added Chromosome of added SNP
pos_added Position of added SNP
genotype_added Genotype of added SNP (discrepant with genotype)
=============== ===========
Returns
-------
pandas.DataFrame
"""
return self._discrepant_merge_genotypes
@property
def discrepant_merge_positions_genotypes(self):
""" SNPs with discrepant positions and / or genotypes discovered while merging SNPs.
Notes
-----
Definitions of columns in this dataframe are as follows:
=============== ===========
Column Description
=============== ===========
rsid SNP ID
chrom Chromosome of existing SNP
pos Position of existing SNP
genotype Genotype of existing SNP
chrom_added Chromosome of added SNP
pos_added Position of added SNP (possibly discrepant with pos)
genotype_added Genotype of added SNP (possibly discrepant with genotype)
=============== ===========
Returns
-------
pandas.DataFrame
"""
df = self._discrepant_merge_positions.append(self._discrepant_merge_genotypes)
if len(df) > 1:
df = df.drop_duplicates()
return df
@property
def build(self):
""" Build of SNPs.
Returns
-------
int
"""
return self._build
@property
def build_detected(self):
""" Status indicating if build of SNPs was detected.
Returns
-------
bool
"""
return self._build_detected
@property
def assembly(self):
""" Assembly of SNPs.
Returns
-------
str
"""
if self.build == 37:
return "GRCh37"
elif self.build == 36:
return "NCBI36"
elif self.build == 38:
return "GRCh38"
else:
return ""
@property
def count(self):
""" Count of SNPs.
Returns
-------
int
"""
return self.get_count()
@property
def chromosomes(self):
""" Chromosomes of SNPs.
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if not self._snps.empty:
return list(pd.unique(self._snps["chrom"]))
else:
return []
@property
def chromosomes_summary(self):
""" Summary of the chromosomes of SNPs.
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
if not self._snps.empty:
chroms = list( | pd.unique(self._snps["chrom"]) | pandas.unique |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or | is_interval_dtype(dtype) | pandas.core.dtypes.common.is_interval_dtype |
from itertools import combinations
from pandas import Categorical, concat
from scipy.stats import spearmanr
de_labels = {
'deseq2_Rachel': 'DESeq2 from dr Rachel',
**{
method.lower().replace('-', '_') + id_suffix: method + suffix
for method in ['DESeq2', 'voom-TMM', 'voom-RLE', 'voom-EEE', 'voom-qtotal']
for id_suffix, suffix in {
'_no_filter': ' without filtering',
'_filtered': ' filtered',
'_reproduction': ' reproduced',
'_weighted': ' weighted',
'_shrinkage_normal': ' "normal" shrinkage',
'_shrinkage_apeglm': ' "apeglm" shrinkage',
'_shrinkage_ashr': ' "ashr" shrinkage'
}.items()
}
}
deseq_cols = ['pvalue', 'padj']
voom_cols = {
'P.Value': 'pvalue',
'adj.P.Val': 'padj'
}
def significant_set(p_values, threshold=0.05):
return set(p_values.query(f'padj < {threshold}').index)
def significant_union(p_values, a, b, threshold=0.05):
return (
significant_set(p_values[a], threshold)
|
significant_set(p_values[b], threshold)
)
def get_ranks(p_values, method, subset, labels):
return (
p_values[method]
.dropna()
.reindex(subset)
.rename_axis('gene').reset_index()
.sort_values(['padj', 'pvalue', 'gene'])
.set_index('gene')
.rank(method='dense', numeric_only=True)
.assign(method=labels[method])
).rename(columns={'pvalue': 'pvalue_rank'})
def corr_label(res):
return f'corr = {res.correlation:.2f}, p = {res.pvalue:.2f}'
def generate_comparison(p_values, methods, threshold=0.05, labels=de_labels, method=spearmanr, nan_policy='omit', rename=None):
df = concat([
concat([ranks_a, ranks_b]).assign(
contrast=f'{labels[a]} - {labels[b]}',
corr=corr_label(method(
ranks_a.loc[significanct_in_either]['pvalue_rank'],
ranks_b.loc[significanct_in_either]['pvalue_rank'],
nan_policy=nan_policy
))
)
for a, b in combinations(sorted(methods), 2)
for a, b in [sorted([a, b])]
for significanct_in_either in [significant_union(p_values, a, b, threshold)]
for ranks_a, ranks_b in [[
get_ranks(p_values, a, significanct_in_either, labels).assign(side=-1),
get_ranks(p_values, b, significanct_in_either, labels).assign(side=+1)
]]
]).reset_index()
if rename is not None:
df['name'] = rename.loc[df.gene].values
df = df.sort_values('method')
df['method'] = | Categorical(df['method'], ordered=True) | pandas.Categorical |
import itertools
from datetime import timedelta
import pandas as pd
from pandas.tseries.frequencies import to_offset
from pandarallel.utils.tools import chunk, PROGRESSION
class RollingGroupBy:
@staticmethod
def reduce(results, _):
return pd.concat(results, copy=False)
@staticmethod
def get_chunks(nb_workers, rolling_groupby, *args, **kwargs):
chunks = chunk(len(rolling_groupby._groupby), nb_workers)
iterator = iter(rolling_groupby._groupby)
for chunk_ in chunks:
yield [next(iterator) for _ in range(chunk_.stop - chunk_.start)]
@staticmethod
def att2value(rolling):
attributes = {
attribute: getattr(rolling, attribute) for attribute in rolling._attributes
}
# Fix window for win_type = freq, because then it was defined by the user in a format like '1D' and refers
# to a time window rolling
if "win_type" in attributes and attributes["win_type"] == "freq":
window = to_offset(timedelta(microseconds=int(attributes["window"] / 1000)))
attributes["window"] = window
attributes.pop("win_type")
return attributes
@staticmethod
def worker(
tuples, index, attribute2value, queue, progress_bar, func, *args, **kwargs
):
# TODO: See if this pd.concat is avoidable
results = []
for iteration, (name, df) in enumerate(tuples):
item = df.rolling(**attribute2value).apply(func, *args, **kwargs)
item.index = | pd.MultiIndex.from_product([[name], item.index]) | pandas.MultiIndex.from_product |
import os
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from tools.utils import threshold_raw_values
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
from tools.utils import rmse_parameters
def main(
model_outputs: pd.DataFrame,
gt_column: str,
save_dir: str,
save_name: str,
) -> None:
metric_fns = {
"MAE": mean_absolute_error,
"MSE": mean_squared_error,
"RMSE": rmse_parameters(squared=False),
"R2": r2_score,
}
metrics = {
"Threshold": [],
"MAE": [],
"MSE": [],
"RMSE": [],
"R2": [],
}
gt_values = model_outputs[gt_column]
thresholds = [t * 0.01 for t in range(0, 101)]
for _, threshold in enumerate(thresholds):
metrics["Threshold"].append(threshold)
threshold_values = model_outputs.apply(
threshold_raw_values,
threshold=threshold,
inference_columns=["lung_segment_" + str(idx+1) for idx in range(6)],
axis=1,
)
threshold_values = np.array(threshold_values)
for metric_name, metrics_fn in metric_fns.items():
metrics[metric_name].append(metrics_fn(gt_values, threshold_values))
save_path = os.path.join(save_dir, save_name)
df_metrics = | pd.DataFrame(metrics) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
from .argus_shapes import is_singlestim_dataframe
from . import imgproc
from . import utils
from . import fast_models as fm
import os
import abc
import six
import pickle
import numpy as np
import pandas as pd
import pulse2percept.implants as p2pi
import pulse2percept.utils as p2pu
import scipy.stats as spst
import sklearn.base as sklb
import sklearn.exceptions as skle
import sklearn.metrics as sklm
@six.add_metaclass(abc.ABCMeta)
class BaseModel(sklb.BaseEstimator):
__is_frozen = False
def __init__(self, **kwargs):
# The following parameters serve as default values and can be
# overwritten via `kwargs`
# The model operates on an electrode array, but we cannot instantiate
# it here since we might pass the array's location as search params.
# So we save the array type and set some default values for its
# location:
self.implant_type = p2pi.ArgusII
self.implant_x = 0
self.implant_y = 0
self.implant_rot = 0
# Current maps are thresholded to produce a binary image:
self.img_thresh = 1.0 / np.sqrt(np.e)
# JobLib or Dask can be used to parallelize computations:
self.engine = 'joblib'
self.scheduler = 'threading'
self.n_jobs = -1
# We will store the current map for each electrode in a dict: Since we
# are usually fitting to individual drawings, we don't want to
# recompute the current maps for the same electrode on each trial.
self._curr_map = {}
# Other variables to be set later:
self.implant = None
self.xret = None
self.yret = None
self.name = 'BaseModel'
self.eye = 'RE'
# This flag will be flipped once the ``fit`` method was called
self._is_fitted = False
# Additional parameters can be set using ``_sets_default_params``
self._sets_default_params()
# From here on out, adding more class attributes is not allowed:
self.__is_frozen = True
# Overwrite default parameters with kwargs:
self.set_params(**kwargs)
def __setattr__(self, key, value):
if self.__is_frozen and key not in dir(self):
err = ("%s is a frozen class. You cannot add new attributes "
"such as `%s` to it. Use ``get_params`` to see a dict "
"of class attributes." % (type(self).__name__, key))
raise ValueError(err)
super(BaseModel, self).__setattr__(key, value)
def get_params(self, deep=True):
"""Returns all params that can be set on-the-fly via 'set_params'"""
return {'implant_type': self.implant_type,
'implant_x': self.implant_x,
'implant_y': self.implant_y,
'implant_rot': self.implant_rot,
'img_thresh': self.img_thresh,
'eye': self.eye,
'engine': self.engine,
'scheduler': self.scheduler,
'n_jobs': self.n_jobs}
def _sets_default_params(self):
"""Derived classes can set additional default parameters here"""
pass
def _ename(self, electrode):
"""Returns electrode name with zeros trimmed"""
return '%s%d' % (electrode[0], int(electrode[1:]))
@abc.abstractmethod
def build_ganglion_cell_layer(self):
"""Builds the ganglion cell layer"""
raise NotImplementedError
def build_optic_fiber_layer(self):
"""Builds the optic fiber layer"""
pass
@abc.abstractmethod
def _calcs_el_curr_map(self, electrode):
"""Must return a tuple `current_map`"""
raise NotImplementedError
def calc_curr_map(self, X):
# Calculate current maps only if necessary:
# - Get a list of all electrodes for which we already have a curr map,
# but trim the zeros before the number, e.g. 'A01' => 'A1'
has_el = set([self._ename(k) for k in self._curr_map.keys()])
# - Compare with electrodes in `X` to find the ones we don't have,
# but trim the zeros:
if is_singlestim_dataframe(X):
wants_el = set([self._ename(e) for e in set(X.electrode)])
else:
wants_el = set([self._ename(e) for e in set(X.electrode1)])
wants_el += set([self._ename(e) for e in set(X.electrode2)])
needs_el = wants_el.difference(has_el)
# - Calculate the current maps for the missing electrodes (parallel
# makes things worse - overhead?)
for el in needs_el:
self._curr_map[el] = self._calcs_el_curr_map(el)
def fit(self, X, y=None, **fit_params):
"""Fits the model"""
if not isinstance(X, pd.core.frame.DataFrame):
raise TypeError("'X' must be a pandas DataFrame, not %s" % type(X))
if y is not None and not isinstance(y, pd.core.frame.DataFrame):
raise TypeError("'y' must be a pandas DataFrame, not %s" % type(y))
if np.abs(self.implant_rot) > 2 * np.pi:
raise ValueError("Implant rotation should be in radians.")
# Set additional parameters:
self.set_params(**fit_params)
# Instantiate implant:
if not isinstance(self.implant_type, type):
raise TypeError(("'implant_type' must be a type, not "
"'%s'." % type(self.implant_type)))
self.implant = self.implant_type(x_center=self.implant_x,
y_center=self.implant_y,
rot=self.implant_rot)
# Build the ganglion cell layer:
self.build_ganglion_cell_layer()
# Build the ganglion axon layer (optional):
self.build_optic_fiber_layer()
# Calculate current spread for every electrode in `X`:
self.calc_curr_map(X)
# Inform the object that is has been fitted:
self._is_fitted = True
return self
def predict_image(self, electrode):
"""Predicts a single data point"""
if not isinstance(electrode, six.string_types):
raise TypeError("`electrode` must be a string.")
# Calculate current map with method from derived class:
if self._ename(electrode) not in self._curr_map:
X = {'electrode': self._ename(electrode)}
self.calc_curr_map(pd.DataFrame([X]))
curr_map = self._curr_map[self._ename(electrode)]
if not isinstance(curr_map, np.ndarray):
raise TypeError(("Method '_curr_map' must return a np.ndarray, "
"not '%s'." % type(curr_map)))
return curr_map
@abc.abstractmethod
def _predicts_target_values(self, electrode, img):
"""Must return a dict of predicted values, e.g {'image': img}"""
raise NotImplementedError
def _predicts(self, Xrow):
_, row = Xrow
curr_map = self.predict_image(row['electrode'])
# Rescale output if specified:
out_shape = None
if hasattr(row, 'img_shape'):
out_shape = row['img_shape']
elif hasattr(row, 'image'):
out_shape = row['image'].shape
# Apply threshold to arrive at binarized image:
if not hasattr(self, 'img_thresh'):
raise ValueError("DataFrame must have a column 'img_thresh'")
img = imgproc.get_thresholded_image(curr_map, thresh=self.img_thresh,
out_shape=out_shape)
return self._predicts_target_values(row['electrode'], img)
def predict(self, X):
"""Compute predicted drawing"""
if not self._is_fitted:
raise skle.NotFittedError("This model is not fitted yet. Call "
"'fit' with appropriate arguments "
"before using this method.")
if not isinstance(X, pd.core.frame.DataFrame):
raise TypeError("`X` must be a pandas DataFrame, not %s" % type(X))
# Make sure we calculated the current maps for all electrodes in `X`:
self.calc_curr_map(X)
# Predict percept
engine = 'serial' if self.engine == 'cython' else self.engine
y_pred = p2pu.parfor(self._predicts, X.iterrows(),
engine=engine, scheduler=self.scheduler,
n_jobs=self.n_jobs)
# Convert to DataFrame, preserving the index of `X` (otherwise
# subtraction in the scoring function produces nan)
return | pd.DataFrame(y_pred, index=X.index) | pandas.DataFrame |
import os
import zipfile
from pathlib import Path
import pandas as pd
from fashiondatasets.utils.list import parallel_map
from fashionscrapper.utils.list import distinct
from tqdm.auto import tqdm
from fashionnets.evaluate.helper.cbir_helper import build_similar_idxs
class HistoryHelper_DF2:
"""
Helper Class to Extract and analyze all History Files from within the Result::Zips - as generated by the
ZipResults::Callback
"""
def __init__(self, base_path):
self.base_path = base_path
def aggregate_run_histories(self, run):
run_folder = os.path.join(self.base_path, run)
if not os.path.isdir(run_folder):
return None
files = os.listdir(run_folder)
zips = filter(lambda d: d.endswith(".zip"), files)
zips = map(lambda d: os.path.join(self.base_path, run, d), zips)
list_of_history_lines = map(read_history, zips)
list_of_history_lines = list(list_of_history_lines)
lines = []
for history in list_of_history_lines:
for line in history:
line = line.replace("\r", "").replace(".", ",")
if line not in lines and len(line) > 0:
lines.append(line)
if len(lines) < 1:
return None
header = lines[0].split(";")
values = [line.split(";") for line in lines[1:]]
df = pd.DataFrame(values, columns=header)
csv_path = os.path.join(self.base_path, run, "train_aggregated.csv")
df.to_csv(csv_path, index=False, sep=";", decimal=".")
return run, csv_path
# noinspection DuplicatedCode
def aggregate(self):
runs = os.listdir(self.base_path)
csv_paths = map(self.aggregate_run_histories, runs)
csv_paths = filter(lambda d: d is not None, csv_paths)
run_with_history = map(lambda r: (r[0], pd.read_csv(r[1], sep=";", decimal=".")), csv_paths)
best_checkpoints = map(load_best_checkpoints, run_with_history)
return list(best_checkpoints)
def aggregate_as_df(self):
aggregated_results = self.aggregate()
def flatten_aggregated(line):
metrics = ["min_loss", "min_val_loss"]
values = [line[metrics[0]]["run_name"], line[metrics[0]]["total_epochs"]]
for m in metrics:
for k in ["value", "idx", "epoch"]:
values.append(line[m][k])
return values
header = ["run_name", "total_epoch"] + ["value", "idx", "epoch"] + ["val_value", "val_idx", "val_epoch"]
data = [flatten_aggregated(x) for x in aggregated_results]
data_frame = pd.DataFrame(data, columns=header)
data_frame.to_csv(os.path.join(self.base_path, "best_runs.csv"), index=False, sep=";", decimal=".")
return data_frame
def extract_best_backbones(self):
runs = os.listdir(self.base_path)
csv_paths = map(self.aggregate_run_histories, runs)
csv_paths = filter(lambda d: d is not None, csv_paths)
run_with_history = map(lambda r: (r[0], | pd.read_csv(r[1], sep=";", decimal=".") | pandas.read_csv |
import sys
sys.path.append("../AIF360/")
import numpy as np
from tot_metrics import TPR, TNR
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools\
import OptTools
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from aif360.datasets import StandardDataset
import warnings
import pandas as pd
warnings.simplefilter("ignore")
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
def get_distortion_compas(vold, vnew):
"""Distortion function for the compas dataset. We set the distortion
metric here. See section 4.3 in supplementary material of
http://papers.nips.cc/paper/6988-optimized-pre-processing-for-discrimination-prevention
for an example
Note:
Users can use this as templates to create other distortion functions.
Args:
vold (dict) : {attr:value} with old values
vnew (dict) : dictionary of the form {attr:value} with new values
Returns:
d (value) : distortion value
"""
# Distortion cost
distort = {}
distort['two_year_recid'] = pd.DataFrame(
{'No recid.': [0., 2.],
'Did recid.': [2., 0.]},
index=['No recid.', 'Did recid.'])
distort['age_cat'] = pd.DataFrame(
{'Less than 25': [0., 1., 2.],
'25 to 45': [1., 0., 1.],
'Greater than 45': [2., 1., 0.]},
index=['Less than 25', '25 to 45', 'Greater than 45'])
distort['c_charge_degree'] = pd.DataFrame(
{'M': [0., 2.],
'F': [1., 0.]},
index=['M', 'F'])
distort['priors_count'] = pd.DataFrame(
{'0': [0., 1., 2., 100.],
'1 to 3': [1., 0., 1., 100.],
'More than 3': [2., 1., 0., 100.],
'missing': [0., 0., 0., 1.]},
index=['0', '1 to 3', 'More than 3', 'missing'])
distort['score_text'] = pd.DataFrame(
{'Low': [0., 2.],
'MediumHigh': [2., 0.]},
index=['Low', 'MediumHigh'])
distort['sex'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
distort['race'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
total_cost = 0.0
for k in vold:
if k in vnew:
total_cost += distort[k].loc[vnew[k], vold[k]]
return total_cost
default_mappings = {
'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [{0.0: 'Male', 1.0: 'Female'},
{1.0: 'Caucasian', 0.0: 'Not Caucasian'}]
}
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
class CompasDataset(StandardDataset):
"""ProPublica COMPAS Dataset.
See :file:`aif360/data/raw/compas/README.md`.
"""
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'data/compas/compas-scores-two-years.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = ( | pd.to_datetime(df['c_jail_out']) | pandas.to_datetime |
#!/usr/bin/env python
#
# Copyright (C) 2019
# <NAME>
# Centre of Excellence Cognitive Interaction Technology (CITEC)
# Bielefeld University
#
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
from shutil import rmtree
import numpy as np
import pandas as pd
from PIL import Image
import io
from base64 import encodebytes
def create_directory_if_not_defined(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def delete_files_in_directory(dir,recursive=False):
for the_file in os.listdir(dir):
file_path = os.path.join(dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path) and recursive: rmtree(file_path)
except Exception as e:
print(e)
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def setup_clean_directory(dir):
create_directory_if_not_defined(dir)
delete_files_in_directory(dir,recursive=True)
def get_files_of_type(path, type='jpg'):
return np.array([x for x in sorted(os.listdir(path)) if x.lower().endswith(type.lower())])
def get_files_filtered(path, regex):
import re
matches = []
pattern = re.compile(regex)
for file in get_files_of_type(path,''):
if pattern.match(file):
matches.append(file)
return np.array(matches)
def get_subdirectories(path):
return os.walk(path).__next__()[1]
def array_is_in_array(arr1,arr2):
rtn = np.zeros(arr1.shape, dtype='bool')
for e in arr2:
rtn = np.logical_or(rtn,arr1 == e)
return rtn
def encode_img(img):
pil_img = Image.fromarray(img) # reads the PIL image
byte_arr = io.BytesIO()
pil_img.save(byte_arr, format='PNG') # convert the PIL image to byte array
encoded_img = encodebytes(byte_arr.getvalue()).decode('ascii') # encode as base64
return encoded_img
def get_intersection(arr1, arr2):
rtn = []
for a1 in arr1:
if a1 in arr2:
rtn.append(a1)
return rtn
def get_union(arr1, arr2):
return list(set(arr1) | set(arr2))
def get_elements_not_in(arr1, arr2):
'''returns elements from arr1 that are not in arr2'''
rtn = []
for a1 in arr1:
if not a1 in arr2:
rtn.append(a1)
return rtn
def write_image(img,file):
if np.max(img) <= 1: # is float array
pil_img = Image.fromarray((img * 255).astype(np.uint8))
else:
pil_img = Image.fromarray(img.astype(np.uint8))
pil_img.save(file)
# db helper
def init_db(fields,index=None):
'init db with the specified fields which is a tuple of tuples consisting of the type and name of the field'
#assert len(columns) == len(types)
df = pd.DataFrame(index=None)
for t,c in fields:
df[c] = pd.Series(dtype=t)
if index is not None:
df = df.set_index(index)
return df
def export_db_as_csv(df, export_path):
df.to_csv(export_path)
def add_db_rows(df, add_dict):
types1 = df.dtypes
was_empty = True if len(df) == 0 else False
if df.index.name is None:
df = df.append(add_dict, ignore_index=True)
else:
df = df.append(pd.Series(add_dict,name=add_dict[df.index.name])).drop_duplicates(subset=df.index.name, keep='last').sort_index()
types2 = df.dtypes
if not was_empty and not (types1 == types2).all():
raise TypeError('Typechange detected in data base')
else:
return df
def read_csv(path):
return np.array(pd.read_csv(path, header=None))
def save_csv(data, path):
| pd.DataFrame(data) | pandas.DataFrame |
from cytokit import io as cytokit_io
import os.path as osp
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
def aggregate(config, output_dir):
"""Aggregate cytometry data associated with an experiment into a single dataframe
Args:
config: Experiment configuration
output_dir: Output directory for experiment
Returns:
DataFrame containing concatenation of all tile-based cytometry datasets with a global
cell id as well as global x/y coordinates (where "global" means across region)
"""
# Load per-tile csv exports
df = []
for idx in config.get_tile_indices():
path = cytokit_io.get_cytometry_stats_path(idx.region_index, idx.tile_x, idx.tile_y)
path = osp.join(output_dir, path)
if not osp.exists(path):
logger.warning(
'Expected cytometry data file at "%s" does not exist. '
'It will be ignored but this may be worth investigating', path
)
continue
df.append(pd.read_csv(path))
df = | pd.concat(df) | pandas.concat |
'''
This module contains all functions relating to feature engineering
'''
import pandas as pd
import numpy as np
from .structdata import get_cat_feats, get_num_feats, get_date_cols
def drop_missing(data=None, percent=99):
'''
Drops missing columns with [percent] of missing data.
Parameters:
-------------------------
data: Pandas DataFrame or Series.
percent: float, Default 99
Percentage of missing values to be in a column before it is eligible for removal.
Returns:
Pandas DataFrame or Series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
missing_percent = (data.isna().sum() / data.shape[0]) * 100
cols_2_drop = missing_percent[missing_percent.values > percent].index
print("Dropped {}".format(list(cols_2_drop)))
#Drop missing values
data.drop(cols_2_drop, axis=1, inplace=True)
def drop_redundant(data):
'''
Removes features with the same value in all cell. Drops feature If Nan is the second unique class as well.
Parameters:
-----------------------------
data: DataFrame or named series.
Returns:
DataFrame or named series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
#get columns
cols_2_drop = _nan_in_class(data)
print("Dropped {}".format(cols_2_drop))
data.drop(cols_2_drop, axis=1, inplace=True)
def _nan_in_class(data):
cols = []
for col in data.columns:
if len(data[col].unique()) == 1:
cols.append(col)
if len(data[col].unique()) == 2:
if np.nan in list(data[col].unique()):
cols.append(col)
return cols
def fill_missing_cats(data=None, cat_features=None, missing_encoding=None):
'''
Fill missing values using the mode of the categorical features.
Parameters:
------------------------
data: DataFrame or name Series.
Data set to perform operation on.
cat_features: List, Series, Array.
categorical features to perform operation on. If not provided, we automatically infer the categoricals from the dataset.
missing_encoding: List, Series, Array.
Values used in place of missing. Popular formats are [-1, -999, -99, '', ' ']
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if cat_features is None:
cat_features = get_cat_feats(data)
temp_data = data.copy()
#change all possible missing values to NaN
if missing_encoding is None:
missing_encoding = ['', ' ', -99, -999]
temp_data.replace(missing_encoding, np.NaN, inplace=True)
for col in cat_features:
most_freq = temp_data[col].mode()[0]
temp_data[col] = temp_data[col].replace(np.NaN, most_freq)
return temp_data
def fill_missing_num(data=None, features=None, method='mean'):
'''
fill missing values in numerical columns with specified [method] value
Parameters:
------------------------------
data: DataFrame or name Series.
The data set to fill
features: list.
List of columns to fill
method: str, Default 'mean'.
method to use in calculating fill value.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if features is None:
#get numerical features with missing values
num_feats = get_num_feats(data)
temp_data = data[num_feats].isna().sum()
features = list(temp_data[num_feats][temp_data[num_feats] > 0].index)
print("Found {} with missing values.".format(features))
for feat in features:
if method is 'mean':
mean = data[feat].mean()
data[feat].fillna(mean, inplace=True)
elif method is 'median':
median = data[feat].median()
data[feat].fillna(median, inplace=True)
elif method is 'mode':
mode = data[feat].mode()[0]
data[feat].fillna(mode, inplace=True)
return "Filled all missing values successfully"
def merge_groupby(data=None, cat_features=None, statistics=None, col_to_merge=None):
'''
Performs a groupby on the specified categorical features and merges
the result to the original dataframe.
Parameter:
-----------------------
data: DataFrame
Data set to perform operation on.
cat_features: list, series, 1D-array
categorical features to groupby.
statistics: list, series, 1D-array, Default ['mean', 'count]
aggregates to perform on grouped data.
col_to_merge: str
The column to merge on the dataset. Must be present in the data set.
Returns:
Dataframe.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if statistics is None:
statistics = ['mean', 'count']
if cat_features is None:
cat_features = get_num_feats(data)
if col_to_merge is None:
raise ValueError("col_to_merge: Expecting a string [column to merge on], got 'None'")
df = data.copy()
for cat in cat_features:
temp = df.groupby([cat]).agg(statistics)[col_to_merge]
#rename columns
temp = temp.rename(columns={'mean': cat + '_' + col_to_merge + '_mean', 'count': cat + '_' + col_to_merge + "_count"})
#merge the data sets
df = df.merge(temp, how='left', on=cat)
return df
def get_qcut(data=None, col=None, q=None, duplicates='drop', return_type='float64'):
'''
Cuts a series into bins using the pandas qcut function
and returns the resulting bins as a series for merging.
Parameter:
-------------
data: DataFrame, named Series
Data set to perform operation on.
col: str
column to cut/binnarize.
q: integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
duplicates: Default 'drop',
If bin edges are not unique drop non-uniques.
return_type: dtype, Default (float64)
Dtype of series to return. One of [float64, str, int64]
Returns:
--------
Series, 1D-Array
'''
temp_df = pd.qcut(data[col], q=q, duplicates=duplicates).to_frame().astype('str')
#retrieve only the qcut categories
df = temp_df[col].str.split(',').apply(lambda x: x[0][1:]).astype(return_type)
return df
def create_balanced_data(data=None, target=None, categories=None, class_sizes=None, replacement=False ):
'''
Creates a balanced data set from an imbalanced one. Used in a classification task.
Parameter:
----------------------------
data: DataFrame, name series.
The imbalanced dataset.
target: str
Name of the target column.
categories: list
Unique categories in the target column. If not set, we use infer the unique categories in the column.
class_sizes: list
Size of each specified class. Must be in order with categoriess parameter.
replacement: bool, Default True.
samples with or without replacement.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if target is None:
raise ValueError("target: Expecting a String got 'None'")
if categories is None:
categories = list(data[target].unique())
if class_sizes is None:
#set size for each class to same value
temp_val = int(data.shape[0] / len(data[target].unique()))
class_sizes = [temp_val for _ in list(data[target].unique())]
temp_data = data.copy()
data_category = []
data_class_indx = []
#get data corrresponding to each of the categories
for cat in categories:
data_category.append(temp_data[temp_data[target] == cat])
#sample and get the index corresponding to each category
for class_size, cat in zip(class_sizes, data_category):
data_class_indx.append(cat.sample(class_size, replace=True).index)
#concat data together
new_data = | pd.concat([temp_data.loc[indx] for indx in data_class_indx], ignore_index=True) | pandas.concat |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期低点", "前期低点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期低点日期"] = pd.to_datetime(big_df["前期低点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期低点"] = pd.to_numeric(big_df["前期低点"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续上涨
http://data.10jqka.com.cn/rank/lxsz/
:return: 连续上涨
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_lxxd_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续下跌
http://data.10jqka.com.cn/rank/lxxd/
:return: 连续下跌
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_cxfl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续放量
http://data.10jqka.com.cn/rank/cxfl/
:return: 持续放量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"放量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["放量天数"] = pd.to_numeric(big_df["放量天数"])
return big_df
def stock_rank_cxsl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续缩量
http://data.10jqka.com.cn/rank/cxsl/
:return: 持续缩量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"缩量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["缩量天数"] = pd.to_numeric(big_df["缩量天数"])
return big_df
def stock_rank_xstp_ths(symbol: str = "500日均线") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-向上突破
http://data.10jqka.com.cn/rank/xstp/
:param symbol: choice of {"5日均线", "10日均线", "20日均线", "30日均线", "60日均线", "90日均线", "250日均线", "500日均线"}
:type symbol: str
:return: 向上突破
:rtype: pandas.DataFrame
"""
symbol_map = {
"5日均线": 5,
"10日均线": 10,
"20日均线": 20,
"30日均线": 30,
"60日均线": 60,
"90日均线": 90,
"250日均线": 250,
"500日均线": 500,
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"最新价",
"成交额",
"成交量",
"涨跌幅",
"换手率",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["换手率"] = big_df["换手率"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_ | numeric(big_df["涨跌幅"]) | pandas.to_numeric |
import pandas as pd
import os
from decimal import Decimal
# from sklearn.cluster import KMeans,DBSCAN
import datetime,json,time,random
from math import radians, cos, sin, asin, sqrt,degrees
from impala.dbapi import connect
from sqlalchemy import create_engine
import MySQLdb
from collections import OrderedDict
from retrying import retry
import redis
from core.conf import config
from core.redis_helper import Logger_Redis,RedisHelper
| pd.set_option('display.width', 400) | pandas.set_option |
"""
.. module:: others
:synopsis: Others Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin
class DailyReturnIndicator(IndicatorMixin):
"""Daily Return (DR)
Args:
close(pandas.Series): dataset 'Close' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, fillna: bool = False):
self._close = close
self._fillna = fillna
self._run()
def _run(self):
self._dr = (self._close / self._close.shift(1, fill_value=self._close.mean())) - 1
self._dr *= 100
def daily_return(self) -> pd.Series:
"""Daily Return (DR)
Returns:
pandas.Series: New feature generated.
"""
dr = self._check_fillna(self._dr, value=0)
return pd.Series(dr, name='d_ret')
class DailyLogReturnIndicator(IndicatorMixin):
"""Daily Log Return (DLR)
https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe
Args:
close(pandas.Series): dataset 'Close' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, fillna: bool = False):
self._close = close
self._fillna = fillna
self._run()
def _run(self):
self._dr = np.log(self._close).diff()
self._dr *= 100
def daily_log_return(self) -> pd.Series:
"""Daily Log Return (DLR)
Returns:
pandas.Series: New feature generated.
"""
dr = self._check_fillna(self._dr, value=0)
return pd.Series(dr, name='d_logret')
class CumulativeReturnIndicator(IndicatorMixin):
"""Cumulative Return (CR)
Args:
close(pandas.Series): dataset 'Close' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, fillna: bool = False):
self._close = close
self._fillna = fillna
self._run()
def _run(self):
self._cr = (self._close / self._close.iloc[0]) - 1
self._cr *= 100
def cumulative_return(self) -> pd.Series:
"""Cumulative Return (CR)
Returns:
pandas.Series: New feature generated.
"""
cr = self._check_fillna(self._cr, value=-1)
return | pd.Series(cr, name='cum_ret') | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
BdPRC_MD.py
Bd-RPC
(Bases dependent Rapid Phylogenetic Clustering)
MAKE DATABASE
Author: <NAME>
'''
#####Make Database function
def calcuate_bases_frequency(aligned_seq_location):
from Bio import SeqIO
A = []
C = []
G = []
T = []
seq_len = 0 ##test whether aligned
seq_id = []
sequence_out = []
for seq_record in SeqIO.parse(aligned_seq_location,'fasta'):
seq_id.append(seq_record.id)
sequence = seq_record.seq.lower() ####change (A C G T) to (a c g t)
A.append(sequence.count('a'))
C.append(sequence.count('c'))
G.append(sequence.count('g'))
T.append(sequence.count('t'))
###test aligned sequence
if seq_len == 0:
seq_len = len(sequence)
if seq_len != len(sequence):
exit(print('Please input aligned sequences'))
sequence_out.append(sequence)
###########################
freq_A = sum(A)/(sum(A)+sum(C)+sum(G)+sum(T))
freq_C = sum(C)/(sum(A)+sum(C)+sum(G)+sum(T))
freq_G = sum(G)/(sum(A)+sum(C)+sum(G)+sum(T))
freq_T = sum(T)/(sum(A)+sum(C)+sum(G)+sum(T))
print ('Frequency A : '+ str(freq_A) +'\n'+
'Frequency C : '+ str(freq_C) +'\n'+
'Frequency G : '+ str(freq_G) +'\n'+
'Frequency T : '+ str(freq_T))
return [freq_A,freq_C,freq_G,freq_T],seq_id,sequence_out
def bases_convert (pi, sequence, convert_rule_location = '' ):
import numpy as np
from Bio import SeqIO
if convert_rule_location == '':
A = np.array([1,0,0,0,1,0])* (1-pi[0])
C = np.array([0,1,0,0,0,1])* (1-pi[1])
G = np.array([0,0,1,0,1,0])* (1-pi[2])
T = np.array([0,0,0,1,0,1])* (1-pi[3])
# A = np.array([1,0,0,0,1,0])
# C = np.array([0,1,0,0,0,1])
# G = np.array([0,0,1,0,1,0])
# T = np.array([0,0,0,1,0,1])
# A = np.array([1,0,0,0])* (1-pi[0])
# C = np.array([0,1,0,0])* (1-pi[1])
# G = np.array([0,0,1,0])* (1-pi[2])
# T = np.array([0,0,0,1])* (1-pi[3])
# A = np.array([1,0,0,0])
# C = np.array([0,1,0,0])
# G = np.array([0,0,1,0])
# T = np.array([0,0,0,1])
# A = np.array([1,0,0,0,1,0])* (pi[0])
# C = np.array([0,1,0,0,0,1])* (pi[1])
# G = np.array([0,0,1,0,1,0])* (pi[2])
# T = np.array([0,0,0,1,0,1])* (pi[3])
# A = np.array([1,0,0,0])* (pi[0])
# C = np.array([0,1,0,0])* (pi[1])
# G = np.array([0,0,1,0])* (pi[2])
# T = np.array([0,0,0,1])* (pi[3])
else:
convert_rule = np.loadtxt(convert_rule_location ,delimiter = ',',encoding = 'utf-8-sig') ###sort by A C G T
A = convert_rule[0,:]
C = convert_rule[1,:]
G = convert_rule[2,:]
T = convert_rule[3,:]
R = (A + G)/2
Y = (C + T)/2
S = (G + C)/2
W = (A + T)/2
K = (G + T)/2
M = (A + C)/2
B = (C + G + T)/3
D = (A + G + T)/3
H = (A + C + T)/3
V = (A + C + G)/3
gap = N = (A + C + G + T)/4
seq_change_matrix = []
for i in range(len(sequence)):
tmp_seq = []
for j in range(len(sequence[i])):
if sequence[i][j] == 'a':
tmp_seq.append(A)
if sequence[i][j] == 'c':
tmp_seq.append(C)
if sequence[i][j] == 'g':
tmp_seq.append(G)
if sequence[i][j] == 't':
tmp_seq.append(T)
if sequence[i][j] == '-':
tmp_seq.append(gap)
if sequence[i][j] == 'r':
tmp_seq.append(R)
if sequence[i][j] == 'y':
tmp_seq.append(Y)
if sequence[i][j] == 's':
tmp_seq.append(S)
if sequence[i][j] == 'w':
tmp_seq.append(W)
if sequence[i][j] == 'k':
tmp_seq.append(K)
if sequence[i][j] == 'm':
tmp_seq.append(M)
if sequence[i][j] == 'b':
tmp_seq.append(B)
if sequence[i][j] == 'd':
tmp_seq.append(D)
if sequence[i][j] == 'h':
tmp_seq.append(H)
if sequence[i][j] == 'v':
tmp_seq.append(V)
if sequence[i][j] == 'n':
tmp_seq.append(N)
tmp_seq = np.array(tmp_seq)
tmp_seq = tmp_seq.reshape(1,tmp_seq.shape[0]*tmp_seq.shape[1])
seq_change_matrix.append(tmp_seq[0])
seq_change_matrix = np.array(seq_change_matrix)
return seq_change_matrix,[A,C,G,T]
def PCA_improved(seq_change_matrix,PCA_components = 'max'):
from sklearn.decomposition import PCA
import numpy as np
seq_change_matrix = np.array(seq_change_matrix)
if PCA_components == 'max':
PCA_components = seq_change_matrix.shape[0]
else:
PCA_components = int(PCA_components)
pca = PCA(n_components=PCA_components)
pca.fit(seq_change_matrix)
seq_change_matrix_PCA = pca.fit_transform(seq_change_matrix)
#print ('PCA explained variance = ' + str(sum(pca.explained_variance_ratio_)))
return seq_change_matrix_PCA
def information_clustering(seq_change_matrix_PCA,seq_id,distance_exponent = 2, clustering_method = 'single',clustering_information = '',cluster_number = 2):
####make Database
from sklearn.cluster import AgglomerativeClustering
from scipy.spatial.distance import pdist, squareform
import numpy as np
import pandas as pd
####calcuate distance matrix
if distance_exponent == 2:
distance_matrix = pdist(seq_change_matrix_PCA,'euclidean')
distance_matrix = squareform(distance_matrix)
elif distance_exponent == 1:
distance_matrix = pdist(seq_change_matrix_PCA,'cityblock')
distance_matrix = squareform(distance_matrix)
else:
distance_matrix = pdist(seq_change_matrix_PCA,'minkowski',p=distance_exponent)
distance_matrix = squareform(distance_matrix)
####
###clustering
output_id = []
output_location = []
output_identity = []
output_index = []
output_density = []
### identity = jaccard value
if clustering_information == '':
clustering = AgglomerativeClustering(n_clusters = cluster_number,affinity = 'precomputed',
linkage = clustering_method).fit(distance_matrix)
for i in range(cluster_number):
output_id.append('cluster%s' % i)
output_location.append(np.where(clustering.labels_==i))
output_identity.append(1)
output_density.append(np.max(distance_matrix[np.where(clustering.labels_==i)[0],:][:,np.where(clustering.labels_==i)[0]]))
else:
###input information
information = pd.read_csv(clustering_information, sep=',', header=None)
###information -- seq_id, clade, subclade .....
cluster_level_number = len(information.loc[0])##remove seqid
seq_id = pd.DataFrame(seq_id)
information = pd.merge(seq_id,information,on=0) ##match information
for z in range(1,cluster_level_number):
if z == 1:
cluster_information_index = []
for i in range(len(pd.value_counts(information[z]).index)):
# clustering_number_remove += 1
cluster_information_index.append(pd.value_counts(information[z]).index[i])###input information index
###Matching Identity -> Jaccard A n B/A U B
tmp_cluster_identity = [[] for i in range(len(cluster_information_index))]
tmp_cluster_location = [[] for i in range(len(cluster_information_index))]
if len(cluster_information_index)*3 > distance_matrix.shape[0]:
max_clustering_number = distance_matrix.shape[0]
else:
max_clustering_number = len(cluster_information_index)*3
for clustering_number in range(1,max_clustering_number):
clustering = AgglomerativeClustering(n_clusters = clustering_number,affinity = 'precomputed',
linkage = clustering_method).fit(distance_matrix)
for i in range(clustering_number):
for j in range(len(pd.value_counts(information[z][list(np.where(clustering.labels_ == i)[0])]).index)):
match_information_index = cluster_information_index.index(pd.value_counts(information[z][list(np.where(clustering.labels_ == i)[0])]).index[j])
tmp_cluster_map_number = pd.value_counts(information[z][list(np.where(clustering.labels_ == i)[0])])[j]
tmp_cluster_total_number = sum(pd.value_counts(information[z][list(np.where(clustering.labels_ == i)[0])]))
total_information_number = len(information[information[1] == cluster_information_index[match_information_index]])
identity = tmp_cluster_map_number / (tmp_cluster_total_number+total_information_number-tmp_cluster_map_number)
tmp_cluster_identity[match_information_index].append(identity)
tmp_cluster_location[match_information_index].append(list(np.where(clustering.labels_ == i)[0]))
for i in range (len(tmp_cluster_identity)):
max_identity = max(tmp_cluster_identity[i])
max_identity_index = np.where(np.array(tmp_cluster_identity[i]) == max_identity)[0][0]
output_id.append(cluster_information_index[i])
output_identity.append(max_identity)
output_location.append(tmp_cluster_location[i][max_identity_index])
output_index.append(z)
output_density.append(np.max(distance_matrix[tmp_cluster_location[i][max_identity_index],:][:,tmp_cluster_location[i][max_identity_index]]))
else:
clustering_index = z - 1
for y in range (len(np.where(np.array(output_index)== clustering_index)[0])):
##change distance matrix by output id
distance_matrix_change = distance_matrix[output_location[np.where(np.array(output_index)==clustering_index)[0][y]],:][:,output_location[np.where(np.array(output_index)==clustering_index)[0][y]]]
information_change = information[z][output_location[np.where(np.array(output_index)==clustering_index)[0][y]]]
cluster_information_index = []
for i in range(len(pd.value_counts(information_change).index)):
# clustering_number_remove += 1
cluster_information_index.append( | pd.value_counts(information_change) | pandas.value_counts |
from __future__ import division
from IPython.display import display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import seaborn as sns
from statsmodels.nonparametric.smoothers_lowess import lowess
from treeinterpreter import treeinterpreter as ti
sns.set_palette('colorblind')
blue, green, red, purple, yellow, cyan, = sns.color_palette('colorblind',6)
def plot_obs_feature_contrib(clf, contributions, features_df, labels, index,
class_index=0, num_features=None,
order_by='natural', violin=False, **kwargs):
"""Plots a single observation's feature contributions.
Inputs:
clf - A Decision Tree or Random Forest classifier object
contributions - The contributions from treeinterpreter
features_df - A Pandas DataFrame with the features
labels - A Pandas Series of the labels
index - An integer representing which observation we would like to
look at
class_index - The index of which class to look at (Default: 0)
num_features - The number of features we wish to plot. If None, then
plot all features (Default: None)
order_by - What to order the contributions by. The default ordering
is the natural one, which takes the original feature
ordering. (Options: 'natural', 'contribution')
violin - Whether to plot violin plots (Default: False)
Returns:
obs_contrib_df - A Pandas DataFrame that includes the feature values
and their contributions
"""
def _extract_contrib_array():
# If regression tree
if len(contributions.shape) == 2:
if class_index > 0:
raise ValueError('class_index cannot be positive for regression.')
contrib_array = contributions[index]
# If classification tree
elif len(contributions.shape) == 3:
if class_index >= contributions.shape[2]:
raise ValueError('class_index exceeds number of classes.')
contrib_array = contributions[index, :, class_index]
else:
raise ValueError('contributions is not the right shape.')
return contrib_array
def _plot_contrib():
"""Plot contributions for a given observation. Also plot violin
plots for all other observations if specified.
"""
if violin:
# Get contributions for the class
if len(contributions.shape) == 2:
contrib = contributions
elif len(contributions.shape) == 3:
contrib = contributions[:, :, class_index]
contrib_df = | pd.DataFrame(contrib, columns=features_df.columns) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from tags_classifier_library.preprocess import (
clean_tag,
decontracted,
exclude_notes,
preprocess,
relabel,
)
from tags_classifier_library.utils import find_text
TEST_COLUMN_RELABEL_MAP = [
{
"columns": ["Transition Period - General", "policy_issue_types"],
"transform": lambda row: 1
if row["policy_issue_types"] == '{"EU exit"}'
else row["Transition Period - General"],
},
{
"columns": ["Covid-19"],
"transform": lambda row: 1
if any(i in find_text(row)[1] for i in ["covid", "coronavirus"])
else row["Covid-19"],
},
]
TEST_TAG_REPLACE_MAP = {
"Opportunities": "Opportunities",
"Exports - other": "Exports",
"Export": "Exports",
}
TEST_TAG_REMOVED = ["Reduced profit"]
@pytest.mark.parametrize(
"test_input,expected",
[
("they won't", "they will not"),
("they can't", "they can not"),
("coronavirus", "covid"),
("corona virus", "covid"),
("http://example", ""),
("don't", "do not"),
("we're", "we are"),
("'s", " is"),
("they'd", "they would"),
("they'll", "they will"),
("'t", " not"),
("'ve", " have"),
("'m", " am"),
],
)
def test_decontracted(test_input, expected):
result = decontracted(test_input)
assert expected == result
@pytest.mark.parametrize(
"test_input,expected",
[
("Please see email above.", None),
(
"Lorem ipsum. This is a longer note, please refer to above notes",
"Lorem ipsum. This is a longer note, please refer to above notes",
),
],
)
def test_exclude_notes(test_input, expected):
df = | pd.DataFrame(data={"sentence": [test_input]}) | pandas.DataFrame |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = | pd.read_csv(test_path, header=None) | pandas.read_csv |
"""CoinPaprika view"""
__docformat__ = "numpy"
import argparse
import difflib
from typing import List
import pandas as pd
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_positive
import gamestonk_terminal.cryptocurrency.discovery.coinpaprika_model as paprika
from gamestonk_terminal.cryptocurrency.overview.coinpaprika_model import (
get_list_of_coins,
)
def search(other_args: List[str]):
"""Search in CoinPaprika
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="search",
description="""Search over CoinPaprika API
You can display only top N number of results with --top parameter.
You can sort data by id, name , category --sort parameter and also with --descend flag to sort descending.
To choose category in which you are searching for use --cat/-c parameter. Available categories:
currencies|exchanges|icos|people|tags|all
Displays:
id, name, category""",
)
parser.add_argument(
"-q",
"--query",
help="phrase for search",
dest="query",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-c",
"--cat",
help="Categories to search: currencies|exchanges|icos|people|tags|all. Default: all",
dest="category",
default="all",
type=str,
choices=[
"currencies",
"exchanges",
"icos",
"people",
"tags",
"all",
],
)
parser.add_argument(
"-t",
"--top",
default=20,
dest="top",
help="Limit of records",
type=check_positive,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: id",
default="id",
choices=["category", "id", "name"],
)
parser.add_argument(
"--descend",
action="store_false",
help="Flag to sort in descending order (lowest first)",
dest="descend",
default=True,
)
try:
if other_args:
if not other_args[0][0] == "-":
other_args.insert(0, "-q")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
category = ns_parser.category
if category.lower() == "all":
category = "currencies,exchanges,icos,people,tags"
df = paprika.search(query=ns_parser.query, category=category)
if df.empty:
print(
f"No results for search query '{ns_parser.query}' in category '{ns_parser.category}'\n"
)
return
df = df.sort_values(by=ns_parser.sortby, ascending=ns_parser.descend)
print(
tabulate(
df.head(ns_parser.top),
headers=df.columns,
floatfmt=".1f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except Exception as e:
print(e, "\n")
def coins(other_args: List[str]):
"""Shows list of all available coins on CoinPaprika
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="coins",
description="""Shows list of all available coins on CoinPaprika.
You can display top N number of coins with --top N flag,
You can search by starting letters with -l/--letter flag like `coins -l M`
And you can also specify by which column you are searching for coin with --key
Displays columns like:
rank, id, name, type""",
)
parser.add_argument(
"-s",
"--skip",
default=0,
dest="skip",
help="Skip n of records",
type=check_positive,
)
parser.add_argument(
"-t",
"--top",
default=15,
dest="top",
help="Limit of records",
type=check_positive,
)
parser.add_argument("-l", "--letter", dest="letter", help="First letters", type=str)
parser.add_argument(
"-k",
"--key",
dest="key",
help="Search in column symbol, name, id",
type=str,
choices=["id", "symbol", "name"],
default="symbol",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = get_list_of_coins()
letter = ns_parser.letter
if letter and isinstance(letter, str):
df = df[
df[ns_parser.key].str.match(f"^({letter.lower()}|{letter.upper()})")
].copy()
try:
df = df[ns_parser.skip : ns_parser.skip + ns_parser.top]
except Exception as e:
print(e)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".1f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except Exception as e:
print(e, "\n")
def find(other_args: List[str]):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="find",
description="""
Find similar coin by coin name,symbol or id. If you don't remember exact name or id of the Coin at CoinPaprika,
you can use this command to display coins with similar name, symbol or id to your search query.
Example of usage: coin name is something like "kusama". So : find -c kusama -k name -t 25
It will search for coin that has similar name to kusama and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-k, --key it's a searching key. You can search by symbol, id or name of coin
-t, --top it displays top N number of records.
""",
)
parser.add_argument(
"-c",
"--coin",
help="Coin name or id, or symbol",
dest="coin",
required="-h" not in other_args,
type=str,
)
parser.add_argument(
"-k",
"--key",
dest="key",
help="Specify by which column you would like to search: symbol, name, id",
type=str,
choices=["id", "symbol", "name"],
default="name",
)
parser.add_argument(
"-t",
"--top",
default=10,
dest="top",
help="Limit of records",
type=check_positive,
)
try:
if other_args:
if not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not ns_parser.coin or ns_parser.coin is None:
print(
"You didn't provide coin. Please use param -c/--coin <coin name>", "\n"
)
return
coins_df = get_list_of_coins()
coins_list = coins_df[ns_parser.key].to_list()
keys = {"name": "title", "symbol": "upper", "id": "lower"}
key = keys.get(ns_parser.key)
coin = getattr(ns_parser.coin, str(key))()
sim = difflib.get_close_matches(coin, coins_list, ns_parser.top)
df = | pd.Series(sim) | pandas.Series |
#!/usr/bin/python3
import datetime
from libutils.network import RandomHeader
from libutils.log import Log, method
from libutils.utils import trans
import pandas as pd
import re
import json
import numpy as np
import requests
from lxml import etree
from dev_global.env import TIME_FMT
from libmysql_utils.mysql8 import (mysqlBase, mysqlHeader, Json2Sql)
from pandas import DataFrame
from requests.models import HTTPError
from libmysql_utils.orm.form import formStockManager
from libbasemodel.cninfo import cninfoSpider
from sqlalchemy import exc
"""
Public method:
> class StockBase
> class StockCodeList
> class StockList
"""
class eastmoneySpider(object):
def __init__(self) -> None:
self.http_header = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Cookie": "intellpositionL=583.188px; qgqp_b_id=c296d37f90f4c939cacc16224744f41a; em_hq_fls=js; intellpositionT=1091px; waptgshowtime=202139; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sh-603776-%u6C38%u5B89%u884C%2Ca-sz-300227-%u5149%u97F5%u8FBE%2Ca-sh-603929-%u4E9A%u7FD4%u96C6%u6210%2Ca-sz-300604-%u957F%u5DDD%u79D1%u6280%2Ca-sz-002245-%u6FB3%u6D0B%u987A%u660C; st_si=80758845330570; st_asi=delete; st_pvi=99506938076002; st_sp=2020-07-19%2022%3A28%3A41; st_inirUrl=https%3A%2F%2Fwww.eastmoney.com%2F; st_sn=15; st_psi=20210309233831816-111000300841-6472437881",
# "Host": "quote.eastmoney.com",
# "If-Modified-Since": "Wed, 10 Mar 2021 00:42:17 GMT",
# "If-None-Match": 'W/"e87c88912663d51:0"',
# "Referer": "https://www.baidu.com/s?wd=%E7%BE%8E%E8%82%A1%E5%88%97%E8%A1%A8",
# "Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36"
}
class StockBase(mysqlBase):
"""
param header: mysqlHeader\n
API:\n
"""
def __init__(self, header: mysqlHeader) -> None:
# if not isinstance(header, mysqlHeader):
# raise HeaderException("Error due to incorrect header.")
super(StockBase, self).__init__(header)
# date format: YYYY-mm-dd
self._Today = datetime.date.today().strftime(TIME_FMT)
# date format: YYYYmmdd
self._today = datetime.date.today().strftime('%Y%m%d')
# self.TAB_STOCK_MANAGER = "stock_manager"
self.j2sql = Json2Sql(header)
self._Header = RandomHeader()
@property
def httpHeader(self) -> dict:
return self._Header()
@property
def Today(self) -> str:
"""
Format: 1983-01-22
"""
self._Today = datetime.date.today().strftime(TIME_FMT)
return self._Today
@property
def today(self) -> str:
"""
Format: 19830122
"""
self._today = datetime.date.today().strftime('%Y%m%d')
return self._today
def get_all_stock_list(self) -> list:
"""
Query stock code from database.
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='t').all()
df = pd.DataFrame.from_dict(query_stock_code)
df.columns = ['stock_code']
stock_list = df['stock_code'].tolist()
# should test if stock list is null
return stock_list
def get_all_index_list(self) -> list:
"""
Return stock code --> list.
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='i').all()
df = pd.DataFrame.from_dict(query_stock_code)
df.columns = ['stock_code']
stock_list = df['stock_code'].tolist()
return stock_list
def get_all_security_list(self) -> list:
"""
Return stock code --> list
"""
# Return all kinds of securities in form stock list.
# Result : List type data.
query_stock_code = self.session.query(formStockManager.stock_code).all()
df = pd.DataFrame.from_dict(query_stock_code)
df.columns = ['stock_code']
stock_list = df['stock_code'].tolist()
return stock_list
@staticmethod
def get_html_object(url: str, HttpHeader: dict) -> etree.HTML:
"""
result is a etree.HTML object
"""
response = requests.get(url, headers=HttpHeader, timeout=3)
if response.status_code == 200:
# setting encoding
response.encoding = response.apparent_encoding
html = etree.HTML(response.text)
elif response.status_code == 304:
html = None
else:
html = None
raise HTTPError(f"Status code: {response.status_code} for {url}")
return html
@staticmethod
def get_excel_object(url: str) -> DataFrame:
df = pd.read_excel(url)
return df
@staticmethod
def set_date_as_index(df: DataFrame) -> DataFrame:
"""
Input must be DataFrame type and must have a column named 'date'.\n
This is a INPLACE operation.
"""
df['date'] = pd.to_datetime(df['date'], format=TIME_FMT)
df.set_index('date', inplace=True)
# exception 1, date index not exists.
# exception 2, date data is not the date format.
return df
@staticmethod
def dataframe_data_translate(df: DataFrame) -> DataFrame:
"""
Translate data format in dataframe to correct type.
"""
for index in df.columns:
try:
if re.search('date', index):
df[index] = pd.to_datetime(df[index])
elif re.search('int', index):
df[index] = pd.to_numeric(df[index])
df[index].replace(np.nan, 0, inplace=True)
elif re.search('float', index):
df[index] = pd.to_numeric(df[index])
df[index].replace(np.nan, 0.0, inplace=True)
elif re.search('char', index):
df[index].replace(np.nan, 'NULL', inplace=True)
except Exception:
pass
return df
def get_close_price(self, stock_code: str) -> DataFrame:
df = self.select_values(stock_code, 'trade_date,close_price')
if not df.empty:
df.columns = ['trade_date, close_price']
df.set_index('trade_date', inplace=True)
return df
class HeaderException(BaseException):
pass
class StockCodeList(object):
"""
This is a base method use for generate stock code list.\n
API:\n
@: get_stock()\n
@: get_index()
"""
@staticmethod
def _get_sh_stock():
stock_list = [f"SH60{str(i).zfill(4)}" for i in range(4000)]
return stock_list
@staticmethod
def _get_sz_stock():
stock_list = [f"SZ{str(i).zfill(6)}" for i in range(1, 1000)]
return stock_list
@staticmethod
def _get_cyb_stock():
stock_list = [f"SZ300{str(i).zfill(3)}" for i in range(1, 1000)]
return stock_list
@staticmethod
def _get_zxb_stock():
stock_list = [f"SZ002{str(i).zfill(3)}" for i in range(1, 1000)]
return stock_list
@staticmethod
def _get_b_stock():
s1 = [f"SH900{str(i).zfill(3)}" for i in range(1, 1000)]
s2 = [f"SZ200{str(i).zfill(3)}" for i in range(1, 1000)]
stock_list = s1 + s2
return stock_list
@staticmethod
def _get_index():
index1 = [f"SH000{str(i).zfill(3)}" for i in range(1000)]
index2 = [f"SH950{str(i).zfill(3)}" for i in range(1000)]
index3 = [f"SZ399{str(i).zfill(3)}" for i in range(1000)]
stock_list = index1 + index2 + index3
return stock_list
@staticmethod
def _get_kcb_stock():
stock_list = [f"SH688{str(i).zfill(3)}" for i in range(1000)]
return stock_list
@staticmethod
def _get_xsb_stock():
stock_list = [f"SH83{str(i).zfill(3)}" for i in range(1000)]
return stock_list
@staticmethod
def get_stock():
"""
@API function
"""
stock_list = StockCodeList._get_sh_stock()
stock_list += StockCodeList._get_sz_stock()
stock_list += StockCodeList._get_cyb_stock()
stock_list += StockCodeList._get_zxb_stock()
stock_list += StockCodeList._get_kcb_stock()
stock_list += StockCodeList._get_b_stock()
stock_list += StockCodeList._get_index()
return stock_list
@staticmethod
def get_index():
"""
@API function
"""
stock_list = StockCodeList._get_index()
return stock_list
@staticmethod
def _get_fund():
pass
class StockList(cninfoSpider, StockBase):
"""
基于cninfo网站爬虫获取股票列表。
API: \n
> get_stock() : 获取所有股票代码\n
> get_stock_list : 获取所有股票代码,从上证网站上获取。\n
> get_hk_stock_list : 获取所有港股代码。\n
> get_fund_stock_list : 获取基金代码。\n
> get_bond_stock_list : 获取债券代码。\n
"""
def __init__(self, header: mysqlHeader) -> None:
cninfoSpider.__init__(self)
StockBase.__init__(self, header)
self.j2sql = Json2Sql(header)
self.j2sql.load_table('stock_manager')
def _get_stock_list_data(self, url: str) -> DataFrame:
"""
Get stock list from url: 'http://www.cninfo.com.cn/new/data/szse_stock.json'
"""
# result -> http.response
result = requests.get(url, self.http_header)
# jr -> json like data
jr = json.loads(result.text)
df = | pd.DataFrame(jr['stockList']) | pandas.DataFrame |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position')
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_colMean_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = 6.5
self.assertEqual(expected_result, result)
def test_metrics_colMean_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = np.nan
#self.assertEqual(expected_result, result)
np.testing.assert_equal(expected_result, result)
def test_metrics_colSD_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 3.1622776601683795
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position', 3)
expected_result = 2.29128784747792
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colMax_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 10
self.assertEqual(expected_result, result)
def test_metrics_colMax_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 9
self.assertEqual(expected_result, result)
def test_metrics_colMax_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_colMin_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMin(data_object, 'position')
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_timeAboveSpeed_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, True)
expected_result = 1.002994011976048
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData(data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, False)
expected_result = 0.1675
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_timeAboveSpeed_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.timeAboveSpeed(data_object, 0, False)
expected_result = 0.0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_roadExits_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [1.7679, 1.7679, 1.5551, 1.5551, 1.5551, 1.667174, 1.667174, 1.668028, 1.668028, 1.668028, 1.786122],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_roadExits_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [7.3, 7.4, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [0, 15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0.034
self.assertEqual(expected_result, result)
def test_metrics_roadExits_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'RoadOffset': [-1, -1, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExits(data_object)
expected_result = 0.034
self.assertEqual(expected_result, result)
def test_metrics_roadExitsY_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'YPos': [1.7679, 1.7679, 1.5551, 1.5551, 1.5551, 1.667174, 1.667174, 1.668028, 1.668028, 1.668028, 1.786122],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]}
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.roadExitsY(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_roadExitsY_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'YPos': [7.3, 7.4, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2, 7.2],
'Velocity': [0, 15.1, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3]}
df = | pandas.DataFrame(data=d) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
sys.path.append('./')
from data_layer.phoible import PhoibleInfo
from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language
from util import argparser
def get_symbols(df, field='IPA'):
symbols = set([])
for i, (index, x) in enumerate(df.iterrows()):
symbols |= set([y for y in x[field].split(' ')])
return symbols
def get_lang_len(df, field='IPA'):
lens = []
for i, (index, x) in enumerate(df.iterrows()):
word = x[field].split(' ')
lens += [len(word)]
return np.mean(lens)
def get_lang_ipa_info(df, languages_df, args, field='IPA'):
phoible = PhoibleInfo()
lang_data = []
for lang, lang_df in languages_df.items():
frames = [lang_df['train'], lang_df['val'], lang_df['test']]
full_data = | pd.concat(frames) | pandas.concat |
import numpy as np
from sklearn import datasets
import pandas as pd
def Decision_Surface(data, target, model, surface=True, cell_size=.01):
# Get bounds
x_min, x_max = data[data.columns[0]].min(), data[data.columns[0]].max()
y_min, y_max = data[data.columns[1]].min(), data[data.columns[1]].max()
# Create a mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, cell_size), np.arange(y_min, y_max, cell_size))
meshed_data = pd.DataFrame(np.c_[xx.ravel(), yy.ravel()])
# Add interactions
for i in range(data.shape[1]):
if i <= 1:
continue
meshed_data = np.c_[meshed_data, np.power(xx.ravel(), i)]
# Predict on the mesh
Z = model.predict(meshed_data).reshape(xx.shape)
# Plot mesh and data
if data.shape[1] > 2:
plt.title("humor^(" + str(range(1,complexity+1)) + ") and number_pets")
else:
plt.title("humor and number_pets")
plt.xlabel("humor")
plt.ylabel("number_pets")
if surface:
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.cool, alpha=0.3)
color = ["blue" if t == 0 else "red" for t in target]
def create_data():
# Set the randomness
np.random.seed(36)
# Number of users
n_users = 300
# Relationships
variable_names = ["humor", "number_pets", "age", "favorite_number"]
target_name = "success"
# Generate data
predictors, target = datasets.make_classification(n_features=4, n_redundant=0,
n_informative=2, n_clusters_per_class=2,
n_samples=n_users)
data = | pd.DataFrame(predictors, columns=variable_names) | pandas.DataFrame |
# Omid55
# Start date: 26 Sept 2019
# Modified date: 2 Nov 2019
# Author: <NAME>, <NAME>
# Email: <EMAIL>, <EMAIL>
#
# Module for processing the POGS Jeopardy-like log data.
# Example:
#
from __future__ import division, print_function, absolute_import, unicode_literals
import attr
import json
import re
from enum import Enum, unique
import numpy as np
import pandas as pd
from typing import Text
import copy
class Error(Exception):
"""Base Error class for this module."""
class NotFoundFileError(Error):
"""Error class when a file is not found."""
class EventLogsNotLoadedError(Error):
"""Error class when event logs haven't been loaded in the constructor."""
@unique
class Category(Enum):
SCIENCE_AND_TECHNOLOGY = 1
HISTORY_AND_MYTHOLOGY = 2
LITERATURE_AND_MEDIA = 3
@staticmethod
def parse(category_str: Text) -> 'Category':
str2category_map = {
'Science and Technology': Category.SCIENCE_AND_TECHNOLOGY,
'History and Mythology': Category.HISTORY_AND_MYTHOLOGY,
'Literature and Media': Category.LITERATURE_AND_MEDIA}
if category_str not in str2category_map:
raise ValueError(
'The input category was not found. It was entered: {}'.format(
category_str))
return str2category_map[category_str]
@unique
class Level(Enum):
EASY = 1
MEDIUM = 2
HARD = 3
@staticmethod
def parse(level_str: Text) -> 'Level':
str2level_map = {
'Easy': Level.EASY,
'Medium': Level.MEDIUM,
'Hard': Level.HARD}
if level_str not in str2level_map:
raise ValueError(
'The input level was not found. It was entered: {}'.format(
level_str))
return str2level_map[level_str]
@attr.s
class JeopardyQuestion(object):
"""Jeopardy-like question including category, choices, right answer, etc."""
id = attr.ib(type=int)
question_content = attr.ib(type=str)
answer = attr.ib(type=str)
choices = attr.ib(factory=list)
category = attr.ib(factory=Category)
level = attr.ib(factory=Level)
@attr.s
class JeopardyInfoOptions(object):
"""Options about the executed game."""
num_of_questions = attr.ib(default=45)
num_of_agents = attr.ib(default=4)
num_of_team_members = attr.ib(default=4)
num_of_influence_reports = attr.ib(default=9)
correct_points = attr.ib(default=4)
wrong_points = attr.ib(default=-1)
using_agent_points = attr.ib(default=-1)
questions = attr.ib(factory=list)
@attr.s
class MachineInfo():
"""Info about a machine's usage per question"""
used = attr.ib(default=False)
probability = attr.ib(default=-1)
user = attr.ib(default=-1)
answer_given = attr.ib(default="")
class TeamLogProcessor(object):
"""Processes the logs of one team who played POGS Jeopardy game.
Usage:
loader = TeamLogProcessor(
team_id=1,
logs_directory_path='/home/omid/Datasets/Jeopardy',
game_info=JeopardyInfoOptions())
Properties:
team_id: The id of the existing team in this object.
game_info: Jeopardy-like game information.
team_event_logs: All event logs for this team.
messages: The communication messages among members.
members: List of members in the correct order.
"""
def __init__(self,
team_id: int,
logs_directory_path: Text,
game_info: JeopardyInfoOptions = JeopardyInfoOptions()):
self.team_id = team_id
self.game_info = game_info
self._load_all_files(logs_directory_path)
def _load_all_files(self, logs_directory_path: Text) -> None:
"""Load all files pipeline.
This function only calls all other unit tested functions and does not
need testing.
"""
self.logs_directory_path = logs_directory_path
self._load_game_questions(
file_path=logs_directory_path + '/jeopardy.json')
self._load_this_team_event_logs(
logs_file_path=logs_directory_path + '/event_log.csv',
team_has_subject_file_path=logs_directory_path
+ '/team_has_subject.csv')
self._load_messages()
self._load_answers_chosen()
self._load_machine_usage_info()
self._preload_data(logs_directory_path)
self._define_team_member_order(logs_directory_path)
self._load_ratings()
self._load_accumulated_score()
self._load_survey()
# self._old_load_all(logs_directory_path, self.team_id) ## DELETE.
def _load_this_team_event_logs(self,
logs_file_path:Text,
team_has_subject_file_path: Text) -> None:
event_log = pd.read_csv(
logs_file_path,
sep=',',
quotechar='|',
names=['id', 'event_type', 'event_content', 'timestamp',
'completed_task_id', 'sender_subject_id',
'receiver_subject_id', 'session_id', 'sender', 'receiver',
'extra_data'])
team_subjects = pd.read_csv(
team_has_subject_file_path,
sep=',',
quotechar='|',
names=['id', 'team_id', 'sender_subject_id']).drop('id', 1)
# event_log = pd.read_csv(logs_file_path, sep=',',quotechar="|", names=["id","event_type","event_content","timestamp","completed_task_id","sender_subject_id","receiver_subject_id","session_id","sender","receiver","extra_data"])
# team_subjects = pd.read_csv(team_has_subject_file_path,sep=',',quotechar="|",names=["id","team_id","sender_subject_id"]).drop('id',1)
event_log['sender_subject_id'] = pd.to_numeric(
event_log['sender_subject_id'])
event_log_with_team = pd.merge(
event_log, team_subjects, on='sender_subject_id', how='left')
self.team_event_logs = event_log_with_team[
(event_log_with_team['team_id'] == self.team_id)]
if len(self.team_event_logs) == 0:
raise EventLogsNotLoadedError(
'Logs for team_id={} was not found.'.format(self.team_id))
def _load_game_questions(self, file_path: Text) -> None:
"""Loads every question, choices and the right answer.
"""
self.game_info.questions = {}
with open(file_path, 'r') as f:
question_list = json.load(f)
self.game_info.num_of_questions = len(question_list)
for question in question_list:
self.game_info.questions[question['ID']] = JeopardyQuestion(
id=question['ID'],
question_content=question['question'],
answer=question['Answer'],
choices=question['value'],
category=Category.parse(question['Category']),
level=Level.parse(question['Level']))
def _load_messages(self) -> None:
"""Loads the communication messages for the current team.
Args:
None.
Returns:
None.
Raises:
EventLogsNotLoadedError: If the constructor has not been loaded the
event logs data yet.
"""
if len(self.team_event_logs) == 0:
raise EventLogsNotLoadedError(
'Please first run constructor of TeamLogProcessor.')
indices = [0] + list(
np.where(self.team_event_logs.extra_data == 'SubmitButtonField')[0])
begin_index = 0
end_index = 1
def extract_message(message_content):
return message_content.split('"message":"')[1].split('"')[0]
self.messages = []
while end_index < len(indices):
if indices[end_index] - indices[begin_index] > 4:
df = self.team_event_logs.iloc[
indices[begin_index] + 1: indices[end_index]]
df = df[df.event_type == 'COMMUNICATION_MESSAGE']
df.event_content = df.event_content.apply(extract_message)
df = df[df.event_content != '']
self.messages.append(df)
begin_index = end_index
end_index += 1
def _get_last_individual_answers(self, individual_answers_chosen):
last_answers = {}
for index, row in individual_answers_chosen.iterrows():
answer = row["event_content"].split(',')[0].replace('"', '')
last_answers[row["sender_subject_id"]] = answer
return last_answers
def _get_last_group_answers(self, group_answers_chosen, last_answers):
for index, row in group_answers_chosen.iterrows():
answer = row["event_content"].split(',')[0].replace('"', '')
last_answers[row["sender_subject_id"]] = answer
return last_answers
def _load_answers_chosen(self) -> None:
"""Loads the choices of each person for their initial and final answer"""
indices = [0] + list(
np.where(self.team_event_logs.extra_data == 'SubmitButtonField')[0])
if len(indices) == 1:
raise EventLogsNotLoadedError(
'No answer were found for team {}.'.format(self.team_id))
begin_index = 0
end_index = 1
def extract_answer_and_question(event_content):
event_content_string = event_content[1:-1]
event_content_array = event_content_string.split('||')
answer_info = event_content_array[0].split(':')
answer = answer_info[1]
question_info = event_content_array[1].split(':')
question_number = question_info[1]
return answer + "," + question_number
individual_answers_chosen_list = []
group_answers_chosen = []
self.question_order = []
while end_index < len(indices):
if indices[end_index] - indices[begin_index] > 4:
df = self.team_event_logs.iloc[
indices[begin_index] + 1: indices[end_index]]
df = df[df.extra_data == 'IndividualResponse']
df.event_content = df.event_content.apply(
extract_answer_and_question)
individual_answers_chosen_list.append(df)
df = self.team_event_logs.iloc[
indices[begin_index] + 1: indices[end_index]]
df = df[df.extra_data == 'GroupRadioResponse']
df.event_content = df.event_content.apply(extract_answer_and_question)
df = df[df.event_content != '']
group_answers_chosen.append(df)
begin_index = end_index
end_index += 1
self.individual_answers_chosen = {}
self.group_answers_chosen = {}
# self._set_team_members(individual_answers_chosen_list[0])
for index in range(len(individual_answers_chosen_list)):
event_content = str(individual_answers_chosen_list[index].event_content)
question_number = int(float(event_content.split("\n")[0].split(",")[1]))
self.question_order.append(question_number)
last_answers = self._get_last_individual_answers(individual_answers_chosen_list[index])
self.individual_answers_chosen[question_number] = last_answers
last_group_answers = copy.deepcopy(last_answers)
last_group_answers = self._get_last_group_answers(group_answers_chosen[index], last_group_answers)
self.group_answers_chosen[question_number] = last_group_answers
def _load_machine_usage_info(self) -> None:
"""Loads whether the machine was used for every question."""
indices = [0] + list(
np.where(self.team_event_logs.extra_data == 'SubmitButtonField')[0])
begin_index = 0
end_index = 1
def extract_machine_info(event_content):
event_content_string = event_content[1:-1]
event_content_array = event_content_string.split('||')
machine_info = event_content_array[0].replace('"', '').split(':')
answer = machine_info[2].split("_")[0]
probability = float(machine_info[2].split("_")[1])
question_info = event_content_array[1].split(':')
question_number = int(float(question_info[1]))
return probability, answer, question_number
def get_info_as_list(event_content):
event_content = event_content[event_content.find("(") + 1:-1]
event_content_list = event_content.split(",")
probability = float(event_content_list[0])
answer = event_content_list[1].strip()
question_number = int(event_content_list[2])
return [probability, answer, question_number]
def extract_question_number(event_content):
event_content_string = event_content[1:-1]
event_content_array = event_content_string.split('||')
question_info = event_content_array[1].split(':')
question_number = question_info[1]
return question_number
self.machine_usage_info = {}
while end_index < len(indices):
if indices[end_index] - indices[begin_index] > 4:
df = self.team_event_logs.iloc[
indices[begin_index] + 1: indices[end_index]]
df = df[df.extra_data == 'AskedMachine']
if (not df.empty):
df.event_content = df.event_content.apply(extract_machine_info)
info = get_info_as_list(df.event_content.to_string().split('\n')[0])
user = int(df.iloc[0]["sender_subject_id"])
machine_info = MachineInfo(
used=True,
probability=info[0],
user=user,
answer_given=info[1])
self.machine_usage_info[info[2]] = machine_info
else:
df = self.team_event_logs.iloc[
indices[begin_index] + 1: indices[end_index]]
df = df[df.extra_data == 'IndividualResponse']
df.event_content = df.event_content.apply(extract_question_number)
question_number = int(float(df.iloc[0]["event_content"]))
self.machine_usage_info[question_number] = MachineInfo()
begin_index = end_index
end_index += 1
# def _create_team_member_mapping(self, df) -> None:
# self.team_member_mapping = {}
# for index in range(len(df)):
# sender_subject_id = int(str(df.sender_subject_id.iloc[index]))
# session_id = str(df[df.columns[8]].iloc[index])
# if session_id not in self.team_member_mapping:
# self.team_member_mapping[session_id] = sender_subject_id
# def _add_to_team_member_mapping(self, sender) -> None:
# subject = pd.read_csv(
# self.logs_directory_path + 'subject.csv',
# sep=',',
# quotechar='|',
# names=['sender_subject_id', 'sender', 'sender_dup', 'group',
# 'empty'])
# row = subject.loc[subject['sender'] == sender]
# item = row.iloc[0][0]
# self.team_member_mapping[sender] = item
def _preload_data(self, directory) -> None:
# Preloading of the data
self.event_log = pd.read_csv(directory+"/event_log.csv", sep=',',quotechar="|", names=["id","event_type","event_content","timestamp","completed_task_id","sender_subject_id","receiver_subject_id","session_id","sender","receiver","extra_data"])
self.team_subjects = pd.read_csv(directory+"/team_has_subject.csv",sep=',',quotechar="|",names=["id","teamId","sender_subject_id"]).drop('id',1)
event_log_no_message = self.event_log[(self.event_log['event_type'] == "TASK_ATTRIBUTE")]
event_log_no_message["sender_subject_id"] = pd.to_numeric(event_log_no_message["sender_subject_id"])
event_log_with_team = pd.merge(event_log_no_message, self.team_subjects, on='sender_subject_id', how='left')
event_log_task_attribute = event_log_with_team[(event_log_with_team['event_type'] == "TASK_ATTRIBUTE") & (event_log_with_team['teamId'] == self.team_id)]
#Extract data from event_content column
new_event_content = pd.DataFrame(
index=np.arange(0, len(event_log_task_attribute)),
columns=("id","stringValue", "questionNumber","questionScore","attributeName"))
self.questionNumbers = list()
for i in range(len(event_log_task_attribute)):
new_event_content.id[i] = event_log_task_attribute.iloc[i]["id"]
new_event_content.stringValue[i] = event_log_task_attribute.iloc[i]["event_content"].split("||")[0].split(":")[1].replace('"', '')
new_event_content.questionNumber[i] = event_log_task_attribute.iloc[i]["event_content"].split("||")[1].split(":")[1]
if new_event_content.questionNumber[i] not in self.questionNumbers:
self.questionNumbers.append(new_event_content.questionNumber[i])
new_event_content.questionScore[i] = event_log_task_attribute.iloc[i]["event_content"].split("||")[3].split(":")[1]
new_event_content.attributeName[i] =event_log_task_attribute.iloc[i]["event_content"].split("||")[2].split(":")[1]
self.questionNumbers = self.questionNumbers[1 :]
self.event_log_with_all_data = pd.merge(event_log_task_attribute,new_event_content,on='id', how ='left')
def _define_team_member_order(self, directory) -> None:
# Define teammember order
subjects = pd.read_csv(directory+"/subject.csv", sep=',',quotechar="|", names=["sender_subject_id","externalId","displayName","sessionId","previousSessionSubject"])
team_with_subject_details = pd.merge(self.team_subjects, subjects, on='sender_subject_id', how='left')
self.team_member = team_with_subject_details[(team_with_subject_details['teamId'] == self.team_id)]['displayName']
self.team_size = len(self.team_member)
self.team_array = []
for i in range(self.team_size):
self.team_array.append(self.team_member.iloc[i])
self.members = []
for member in self.team_array:
self.members.append(
list(subjects[subjects['displayName'] == member]['sender_subject_id'])[0])
def _extract_and_fill_missing_values(self, temp, aR, mI, aR_from_data, mI_from_data):
for j in range(0, self.team_size):
agent_from_data = True
member_from_data = True
# Fill missing values
xy = re.findall(r'Ratings(.*?) Member', temp)[0].split("+")[j].split("=")[1]
if(xy==''):
xy = '0.0'
agent_from_data = False
yz= temp.replace('"', '')[temp.index("Influences ")+10:].split("+")[j].split("=")[1]
if(yz == ''):
yz = '25'
member_from_data = False
aR.append(float(xy))
mI.append(int(round(float(yz))))
aR_from_data.append(agent_from_data)
mI_from_data.append(member_from_data)
def _add_values_for_missing_line(self, count, missing_members, a_ratings,
m_influences, a_ratings_from_data, m_influences_from_data):
for member in missing_members:
aR = list()
mI = list()
aR_from_data = list()
mI_from_data = list()
idx = self.team_array.index(member)
for j in range(0, self.team_size):
aR.append(0.0)
mI.append(25)
aR_from_data.append(False)
mI_from_data.append(False)
a_ratings[idx] = aR
m_influences[idx] = mI
a_ratings_from_data[idx] = aR_from_data
m_influences_from_data[idx] = mI_from_data
count += 1
return count
def _load_ratings(self) -> None:
self.agent_ratings = list()
self.member_influences = list()
self.agent_ratings_from_data = list()
self.member_influences_from_data = list()
m_influences = [0 for i in range(self.team_size)]
a_ratings = [0 for i in range(self.team_size)]
m_influences_from_data = [False for i in range(self.team_size)]
a_ratings_from_data = [False for i in range(self.team_size)]
count = 0
influence_matrices = self.event_log_with_all_data[(self.event_log_with_all_data['extra_data'] == "InfluenceMatrix")]
influence_matrix_without_undefined = influence_matrices[~influence_matrices['stringValue'].str.contains("undefined")]
final_influences = influence_matrix_without_undefined.groupby(['questionScore', 'sender'], as_index=False, sort=False).last()
processed_members = []
current_question_score = None
# Loop that extracts values and fills in missing ones for all
# InfluenceMatrix entries
for i in range(len(final_influences)):
count +=1
aR = list()
mI = list()
aR_from_data = list()
mI_from_data = list()
idx = self.team_array.index(final_influences.iloc[i]['sender'])
processed_members.append(final_influences.iloc[i]['sender'])
current_question_score = int(final_influences.iloc[i]['questionScore'])
a_ratings[idx]=aR
m_influences[idx]=mI
a_ratings_from_data[idx] = aR_from_data
m_influences_from_data[idx] = mI_from_data
temp = final_influences.iloc[i]['stringValue']
self._extract_and_fill_missing_values(temp, aR, mI, aR_from_data, mI_from_data)
# Need to check if there is a next influence matrix line or
# if the next line belongs to a different round
# If so, check if we missed a team member's answers (missing line)
# and fill in values
if (i + 2 > len(final_influences) or
int(final_influences.iloc[i + 1]['questionScore']) != current_question_score):
missing_members = np.setdiff1d(self.team_array, processed_members)
count = self._add_values_for_missing_line(count, missing_members,
a_ratings, m_influences, a_ratings_from_data, m_influences_from_data)
processed_members = []
# If we saw everyone's answers, then add their responses to the
# influence matrix data structure along with whether the answers
# were from data or not.
if (count == self.team_size):
self.member_influences.append(m_influences)
self.agent_ratings.append(a_ratings)
self.member_influences_from_data.append(m_influences_from_data)
self.agent_ratings_from_data.append(a_ratings_from_data)
m_influences = [0 for i in range(self.team_size)]
a_ratings = [0 for i in range(self.team_size)]
m_influences_from_data = [False for i in range(self.team_size)]
a_ratings_from_data = [False for i in range(self.team_size)]
count = 0
def _load_accumulated_score(self) -> None:
"""Loads the accumulated score per question"""
self.score = {}
self.accumulated_score = {}
self.accumulated_score[0] = 0
index = 1
for i in self.question_order:
score_earned = 0
final_answer_chosen = None
if len(set(self.group_answers_chosen[i].values())) == 1:
final_answer_chosen = self.group_answers_chosen[i][list(self.group_answers_chosen[i].keys())[0]]
if i not in self.game_info.questions:
print(
'Warning: question {} was not found in the game info.'.format(i))
continue
answer = self.game_info.questions[i].answer
if (final_answer_chosen == answer):
score_earned = score_earned + 4
else:
score_earned = score_earned - 1
if self.machine_usage_info[i].used:
score_earned = score_earned - 1
self.score[i] = score_earned
self.accumulated_score[index] = self.accumulated_score[index - 1] + score_earned
index = index + 1
def _load_survey(self) -> None:
pre_experiment_data = self.event_log_with_all_data[self.event_log_with_all_data['extra_data'] == "RadioField"]
self.pre_experiment_rating = []
for i in range(len(self.team_array)):
survey_dict = { 0: -1, 1: -1, 2: -1}
for row in range (len(pre_experiment_data) - 1, -1, -1):
if (pre_experiment_data.iloc[row]['sender'] == self.team_member.iloc[i]):
current_frame = pre_experiment_data.iloc[row]
if (current_frame['attributeName'] == "\"surveyAnswer0\"" and
survey_dict[0] == -1):
survey_dict[0] = float(current_frame['stringValue'][0:1])
elif(current_frame['attributeName'] == "\"surveyAnswer1\"" and
survey_dict[1] == -1):
survey_dict[1] = float(current_frame['stringValue'][0:1])
elif(current_frame['attributeName'] == "\"surveyAnswer2\"" and
survey_dict[2] == -1):
survey_dict[2] = float(current_frame['stringValue'][0:1])
self.pre_experiment_rating.append(survey_dict)
def _old_load_all(self, directory, teamId):
#Constants
self.numQuestions = 45
self.trainingSetSize = 30
self.testSetSize = 15
self.numAgents = 4
self.numCentralityReports = 9
self.c = 4
self.e = -1
self.z = -1
# Other Parameters
self.influenceMatrixIndex = 0
self.machineUseCount = [-1, -1, -1, -1]
self.firstMachineUsage = [-1, -1, -1, -1]
# Preloading of the data
eventLog = pd.read_csv(directory+"/event_log.csv", sep=',',quotechar="|", names=["id","event_type","event_content","timestamp","completed_task_id","sender_subject_id","receiver_subject_id","session_id","sender","receiver","extra_data"])
teamSubjects = pd.read_csv(directory+"/team_has_subject.csv",sep=',',quotechar="|",names=["id","teamId","sender_subject_id"]).drop('id',1)
elNoMessage = eventLog[(eventLog['event_type'] == "TASK_ATTRIBUTE")]
elNoMessage["sender_subject_id"] = pd.to_numeric(elNoMessage["sender_subject_id"])
eventLogWithTeam = | pd.merge(elNoMessage, teamSubjects, on='sender_subject_id', how='left') | pandas.merge |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from xgboost import XGBRegressor, plot_importance
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler, Normalizer
from sklearn.feature_selection import SelectKBest, f_regression
from model import ElasticNetModel, PCAModel, LassoModel, KernelPCAModel, RidgeModel, SimulatedAnnealing
from tools.train import xgb_parameters_search
# 导入数据并删除无意义的变量
def load_data(dir_path, drop_list=['id', '时间']):
data = pd.read_excel(dir_path)
for col in drop_list:
data.drop(col, axis=1, inplace=True)
return data
def nan_data_rate(df, n, ascending_=False, origin=True):
"""
【Function】缺失率统计函数 nan_data_rate
:param df: 需要处理的数据框
:param n: 显示变量个数
:param ascending_: 按缺失程度上升还是下降表示
:param origin: 是否显示无缺失值失变量
:return: 返回前n个缺失变量缺失率
"""
if n > len(df.columns): # 判断显示个数是否多于变量总数,如果超过则默认显示全部变量
print('显示变量个数多于变量总数%i,将显示全部变量' % (len(df.columns)))
n = len(df.columns)
na_rate = df.isnull().sum() / len(df) * 100 # 统计各变量缺失率
if origin: # 判断为真则显示无缺失值的变量
na_rate = na_rate.sort_values(ascending=ascending_)
missing_data = pd.DataFrame({'Missing_Ratio': na_rate})
else: # 判断为负则显示只有缺失值的变量
na_rate = na_rate.drop(na_rate[na_rate == 0].index).sort_values(ascending=ascending_)
missing_data = pd.DataFrame({'Missing_Ratio': na_rate})
return missing_data.head(n)
# 最小最大标准化
def min_max_normalize(df):
"""
将数据框中所有变量 最小最大值标准化
Args:
df: 原始未标准化的数据框
return: 标准化后的数据框
"""
#
for col in df.columns:
min_val = np.min(df[col])
max_val = np.max(df[col])
df[col] = (df[col] - min_val) / (max_val - min_val)
print("{} 变量已经被最小最大标准化".format(col))
return df
# 数据预处理
def pre_process(df):
df['RON_LOSS_RATE'] = df['RON_LOSS'] / (df['材料辛烷值RON'] + 1e-8)
return df
# 方差筛选
def variance_select(df, k):
"""
选取方差最大的前k个变量
Args:
df: 需要分析的数据框
k: 选取变量的个数
return: 变量列名
"""
var = df.apply(lambda x: x.var())
var = var.sort_values()
return var.tail(k).index
# 特征提取
def feature_select(df):
base = ['材料辛烷值RON', 'D121去稳定塔流量', '还原器温度', 'E-101D壳程出口管温度', 'D-204液位', 'D123冷凝水罐液位',
'D-123压力', 'D-121水液位', 'D-102温度', '原料汽油硫含量',
'TAG表和PID图未见PDI-2107点,是否为DI-2107', '稳定塔顶回流流量', '热循环气去R101底提升气管流量',
'空气预热器空气出口温度', '低压热氮气压力', 'R-101下部床层压降', 'R-101床层中部温度', 'R-101床层下部温度',
'P-101B入口过滤器差压', 'ME-109过滤器差压', 'ME-105过滤器压差', 'F-101辐射室出口压力',
'S_ZORB AT-0004', 'S_ZORB AT-0011', 'D-201含硫污水液位', 'D101原料缓冲罐压力']
# base = ['还原器温度', '预热器出口空气温度', '0.3MPa凝结水出装置流量',
# '对流室出口温度', 'E-101ABC壳程出口温度', 'E-101壳程出口总管温度.1', 'E-101DEF壳程出口温度',
# 'E-101ABC管程出口温度', 'E-101DEF管程出口温度', '塔顶回流罐D201液位', '1.1步骤PIC2401B.OP',
# '1#催化汽油进装置流量', 'R-101床层中部温度.1', 'A-202A/B出口总管温度', 'D-102温度',
# 'E-101A壳程出口管温度', 'D-125液位', '反应器质量空速', '8.0MPa氢气至反吹氢压缩机出口.1', '反应器藏量',
# '由PDI2104计算出', '反应器顶底压差', '8.0MPa氢气至循环氢压缩机入口.1',
# 'EH-102加热元件/A束温度', 'K-102A进气压力', 'EH-102出口空气总管温度', 'S-ZORB.FT_1002.TOTAL',
# ]
return df[base]
def extract_x(df, columns):
x = df.copy()
for col in columns:
x.drop(col, axis=1, inplace=True)
return x
def main_pca():
df = pd.read_excel('/Users/ashzerm/item/GasOline/data/stand_oline.xlsx')
df = min_max_normalize(df)
y = df['RON_LOSS'].copy()
x = extract_x(df, columns=['RON_LOSS', '饱和烃', '烯烃', '芳烃', '溴值', '密度', '产品硫含量', '产品辛烷值RON'])
x = np.array(x)
# pca = PCAModel(20)
pca = KernelPCAModel(20, 'poly')
x_pca = pca.train(x)
# print(x_pca)
xgb_parameters_search(x_pca, y)
els = ElasticNetModel()
els.grid_search_alpha_rho(x, y)
els.test_elastic_net_alpha_rho(x, y)
def main_elastic_net():
# df = pd.read_excel('/Users/ashzerm/item/GasOline/data/oline_xy.xlsx')
df = pd.read_excel('/Users/ashzerm/item/GasOline/data/stand_oline.xlsx')
df = min_max_normalize(df)
y = df['RON_LOSS'].copy()
# x = extract_x(df, columns=['RON_LOSS_RATE'])
x = feature_select(df)
els = ElasticNetModel()
els.train(x, y)
els.test(x, y)
# els.grid_search_alpha_rho(x, y)
els.test_elastic_net_alpha_rho(x, y)
las = LassoModel(alpha=0.01)
las.test_lasso_alpha(x, y)
las.train(x, y, 5)
# xgb_parameters_search(x, y)
# xgb_r = XGBRegressor(max_depth=5, n_estimators=200, learning_rate=0.01)
# xgb_r.fit(x, y)
# plot_importance(xgb_r)
# plt.show()
# regr = RidgeModel()
# regr.train(x, y)
# 自定义创建箱型图的函数
def create_box(data, x_name='', y_name='', title='', order_=[], scatter_=True):
sns.boxplot(x=x_name, y=y_name, data=data,
linewidth=2, # 线宽
width=0.8, # 箱子之间的间隔比例
fliersize=3, # 异常点大小
palette='hls', # 设置调色板
whis=1.5, # 设置IQR
notch=False, # 设置是否以中值做凹槽
order=order_) # 筛选类别
if scatter_:
sns.swarmplot(x=x_name, y=y_name, data=data, color='k', size=3, alpha=0.6)
plt.title(title)
plt.show()
return
def main_q3():
df = pd.read_excel('/Users/ashzerm/item/GasOline/data/stand_oline.xlsx')
# df = min_max_normalize(df)
y = df['RON_LOSS'].copy()
# x = extract_x(df, columns=['RON_LOSS_RATE'])
x = feature_select(df)
bins = [-0.1, 0.6, 1.0, 1.5, 2, 3]
x['RON_LOSS_LEVEL'] = pd.cut(df['RON_LOSS'], bins=bins, labels=['0.6以下', '0.6-1.0', '1.0-1.5', '1.5-2', '2-3'])
for col in df.columns:
try:
create_box(x, 'RON_LOSS_LEVEL', col, "{}与辛烷值损失RON箱型图".format(col),
['0.6以下', '0.6-1.0', '1.0-1.5', '1.5-2', '2-3'])
except:
pass
def main_q5():
df = | pd.read_excel('/Users/ashzerm/item/GasOline/data/stand_oline.xlsx') | pandas.read_excel |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import regex
import scipy.stats as stats
import scipy.optimize as optimize
import scipy.signal as signal
from scipy import cluster
from Bio import SeqIO
sns.set_style('white')
plt.rcParams['xtick.labelsize']=15
plt.rcParams['ytick.labelsize']=15
#functions for reading a fasta and calculating the AT content
def loading_fasta_gbk(file_name,typeoffile):
"""reads either fasta or gbk files, file type needs to be given as 'fasta' or 'genbank' """
loaded=SeqIO.read(file_name, typeoffile)
return loaded
class Genome:
def __init__(self, genome_list,genome_annotation, start, end, strand, length):
self.name=genome_list #list with every gene name such as CCNA_00001
self.annotation=genome_annotation # gene annotation if there is one, if none stores NA
self.start=start # stores translational start position for each gene
self.end=end #stores end position of each gene
self.strand=strand # + or - strand (+1 or -1)
self.length=length # length of gene
def reading_gbk_new(gbk_file, features_to_extract):
"""function that will load from the gbk file: the start, end, strand and length of gene as well as the name and annotated name/function.
Returns one array and 2 lists """
genome_gene=[]
genome_gene_name=[]
genome_start=[]
genome_end=[]
genome_strand=[]
genome_length=[]
for i in range(0,len(gbk_file.features)):
isfeature=False
for j in range(len(features_to_extract)):
if gbk_file.features[i].type == features_to_extract[j]:
isfeature=True
if isfeature==True:
genome_gene.append(gbk_file.features[i].qualifiers['locus_tag'][0])
if 'product' in gbk_file.features[i].qualifiers:
genome_gene_name.append(gbk_file.features[i].qualifiers['product'][0])
else:
genome_gene_name.append('NA')
if gbk_file.features[i].location.strand < 0 :
genome_start.append(gbk_file.features[i].location.end)
genome_end.append(gbk_file.features[i].location.start)
genome_strand.append(-1)
genome_length.append(abs(gbk_file.features[i].location.end-gbk_file.features[i].location.start)+1)
else:
genome_start.append(gbk_file.features[i].location.start)
genome_end.append(gbk_file.features[i].location.end)
genome_strand.append(1)
genome_length.append(abs(gbk_file.features[i].location.end-gbk_file.features[i].location.start)+1)
genome = Genome(genome_gene,genome_gene_name,genome_start,genome_end,genome_strand,genome_length)
return genome
def readGenome(a, skip = 0):
genomeFile = open(a, 'r')
out = ''
if skip != 0:
for i in range(0,skip,1):
genomeFile.readline()
line = genomeFile.readline()
while line != '':
out = out + line[:-1]
line = genomeFile.readline()
return out
def readCDSMG1655(annoteFile, skip = 0):
a = open(annoteFile, 'r')
gtype, start, end, strand, funct, bNum, gene = [], [], [], [], [], [], []
for i in range(0,skip):
a.readline()
line = a.readline()
while line != '':
if regex.findall('CDS', line):
z = line.split('\t')
b = z[8].split('ID=')
c = b[1].split(':')[0]
gtype.append(z[2])
start.append(z[3])
end.append(z[4])
strand.append(z[6])
if regex.findall('product', line):
zz = line.split('product=')[1]
funct.append(zz.split(';')[0])
else:
funct.append('n/a')
y = line.split('locus_tag=')[1]
bNum.append(y.split(';')[0])
gene.append(c.split('\"')[0])
line = a.readline()
out = np.array([gtype, start, end, strand, funct, bNum, gene])
out = | pd.DataFrame(out) | pandas.DataFrame |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2200Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_incompatible_issue(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2330Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'incompatible' in str(excinfo.value).lower()
def test_run_persistence_fx_too_short(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1min'),
run_length=pd.Timedelta('3min'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'requires observation.interval_length' in str(excinfo.value)
def test_run_persistence_incompatible_instant_fx(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'instantaneous forecast' in str(excinfo.value).lower()
def test_run_persistence_incompatible_instant_interval(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
obs = obs_5min_begin.replace(interval_label='instantaneous',
interval_length=pd.Timedelta('10min'))
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs, forecast, run_time,
issue_time)
assert 'identical interval length' in str(excinfo.value)
def test_verify_nwp_forecasts_compatible(ac_power_forecast_metadata):
fx0 = ac_power_forecast_metadata
fx1 = replace(fx0, run_length=pd.Timedelta('10h'), interval_label='ending')
df = pd.DataFrame({'forecast': [fx0, fx1], 'model': ['a', 'b']})
errs = main._verify_nwp_forecasts_compatible(df)
assert set(errs) == {'model', 'run_length', 'interval_label'}
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', True),
('{"is_reference_persistence_forecast": true}', False),
('{"is_reference_forecast": "True"}', True),
('{"is_reference_forecast":"True"}', True),
('is_reference_forecast" : "True"}', True),
('{"is_reference_forecast" : true, "otherkey": badjson, 9}', True),
('reference_forecast": true', False),
('{"is_reference_forecast": false}', False),
("is_reference_forecast", False)
])
def test_is_reference_forecast(string, expected):
assert main._is_reference_forecast(string) == expected
def test_find_reference_nwp_forecasts_json_err(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
extra_params = '{"model": "themodel", "is_reference_forecast": true}'
fxs = [replace(ac_power_forecast_metadata, extra_parameters=extra_params),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "yes"}'),
replace(ac_power_forecast_metadata, extra_parameters='{"is_reference_forecast": true'), # NOQA
replace(ac_power_forecast_metadata, extra_parameters='')]
out = main.find_reference_nwp_forecasts(fxs)
assert logger.warning.called
assert len(out) == 1
def test_find_reference_nwp_forecasts_no_model(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
fxs = [replace(ac_power_forecast_metadata, extra_parameters='{}',
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 0
assert logger.debug.called
assert logger.error.called
def test_find_reference_nwp_forecasts_no_init(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 2
assert out.next_issue_time.unique() == [None]
assert out.piggyback_on.unique() == ['0']
def test_find_reference_nwp_forecasts(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(
fxs, pd.Timestamp('20190501T0000Z'))
assert len(out) == 2
assert out.next_issue_time.unique()[0] == pd.Timestamp('20190501T0500Z')
assert out.piggyback_on.unique() == ['0']
@pytest.fixture()
def forecast_list(ac_power_forecast_metadata):
model = 'nam_12km_cloud_cover_to_hourly_mean'
prob_dict = ac_power_forecast_metadata.to_dict()
prob_dict['constant_values'] = (0, 50, 100)
prob_dict['axis'] = 'y'
prob_dict['extra_parameters'] = '{"model": "gefs_half_deg_to_hourly_mean", "is_reference_forecast": true}' # NOQA
return [replace(ac_power_forecast_metadata,
extra_parameters=(
'{"model": "%s", "is_reference_forecast": true}'
% model),
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "gfs_quarter_deg_hourly_to_hourly_mean", "is_reference_forecast": true}', # NOQA
forecast_id='1'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='2',
variable='ghi'),
datamodel.ProbabilisticForecast.from_dict(prob_dict),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='3',
variable='dni',
provider='Organization 2'
),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "badmodel", "is_reference_forecast": true}', # NOQA
forecast_id='4'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "6", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='5',
variable='ghi'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": false}' % model, # NOQA
forecast_id='7',
variable='ghi'),
]
def test_process_nwp_forecast_groups(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 4
@pytest.mark.parametrize('run_time', [None, pd.Timestamp('20190501T0000Z')])
def test_process_nwp_forecast_groups_issue_time(mocker, forecast_list,
run_time):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert post_vals.call_count == 4
run_nwp.assert_called_with(mocker.ANY, mocker.ANY, mocker.ANY,
pd.Timestamp('20190501T0500Z'))
def test_process_nwp_forecast_groups_missing_var(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-3])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert logger.warning.called
assert post_vals.call_count == 4
def test_process_nwp_forecast_groups_bad_model(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[4:-1])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 0
def test_process_nwp_forecast_groups_missing_runfor(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[-2:])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert api.post_forecast_values.call_count == 0
@pytest.mark.parametrize('ind', [0, 1, 2])
def test__post_forecast_values_regular(mocker, forecast_list, ind):
api = mocker.MagicMock()
fx = forecast_list[ind]
main._post_forecast_values(api, fx, [0], 'whatever')
assert api.post_forecast_values.call_count == 1
def test__post_forecast_values_cdf(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
main._post_forecast_values(api, fx, vals, 'gefs')
assert api.post_probabilistic_forecast_constant_value_values.call_count == 3 # NOQA
def test__post_forecast_values_cdf_not_gefs(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(ValueError):
main._post_forecast_values(api, fx, vals, 'gfs')
def test__post_forecast_values_cdf_less_cols(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(10)})
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, vals, 'gefs')
def test__post_forecast_values_cdf_not_df(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, ser, 'gefs')
def test__post_forecast_values_cdf_no_cv_match(mocker, forecast_list):
api = mocker.MagicMock()
fx = replace(forecast_list[3], constant_values=(
replace(forecast_list[3].constant_values[0], constant_value=3.0
),))
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(KeyError):
main._post_forecast_values(api, fx, vals, 'gefs')
@pytest.mark.parametrize('issue_buffer,empty', [
(pd.Timedelta('10h'), False),
(pd.Timedelta('1h'), True),
(pd.Timedelta('5h'), False)
])
def test_make_latest_nwp_forecasts(forecast_list, mocker, issue_buffer, empty):
session = mocker.patch('solarforecastarbiter.io.api.APISession')
session.return_value.get_user_info.return_value = {'organization': ''}
session.return_value.list_forecasts.return_value = forecast_list[:-3]
session.return_value.list_probabilistic_forecasts.return_value = []
run_time = pd.Timestamp('20190501T0000Z')
# last fx has different org
fxdf = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
process = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.process_nwp_forecast_groups') # NOQA
main.make_latest_nwp_forecasts('', run_time, issue_buffer)
if empty:
process.assert_not_called()
else:
assert_frame_equal(process.call_args[0][-1], fxdf)
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', False),
('{"is_reference_persistence_forecast": true}', True),
('{"is_reference_persistence_forecast": "True"}', True),
('{"is_reference_persistence_forecast":"True"}', True),
('is_reference_persistence_forecast" : "True"}', True),
('{"is_reference_persistence_forecast" : true, "otherkey": badjson, 9}',
True),
('reference_persistence_forecast": true', False),
('{"is_reference_persistence_forecast": false}', False),
("is_reference_persistence_forecast", False)
])
def test_is_reference_persistence_forecast(string, expected):
assert main._is_reference_persistence_forecast(string) == expected
@pytest.fixture
def perst_fx_obs(mocker, ac_power_observation_metadata,
ac_power_forecast_metadata):
observations = [
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
)
]
def make_extra(obs):
extra = (
'{"is_reference_persistence_forecast": true,'
f'"observation_id": "{obs.observation_id}"'
'}'
)
return extra
forecasts = [
ac_power_forecast_metadata.replace(
name='FX0',
extra_parameters=make_extra(observations[0]),
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX no persist',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX bad js',
extra_parameters='is_reference_persistence_forecast": true other',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
)
]
return forecasts, observations
def test_generate_reference_persistence_forecast_parameters(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
assert param_list[1] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_forecast_yet(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 1
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_data(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.NaT, pd.NaT)
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_diff_org(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': 'a new one'}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_not_reference_fx(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(extra_parameters='') for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_no_obs_id(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts[0] = forecasts[0].replace(
extra_parameters='{"is_reference_persistence_forecast": true}')
forecasts[1] = forecasts[1].replace(
extra_parameters='{"is_reference_persistence_forecast": true, "observation_id": "idnotinobs"}') # NOQA
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), | pd.Timestamp('2020-05-20T14:00Z') | pandas.Timestamp |
import pandas as pd
import numpy as np
from collections import Counter
class my_NB:
def __init__(self, alpha=1):
# alpha: smoothing factor
# P(xi = t | y = c) = (N(t,c) + alpha) / (N(c) + n(i)*alpha)
# where n(i) is the number of available categories (values) of feature i
# Setting alpha = 1 is called Laplace smoothing
self.alpha = alpha
def fit(self, X, y):
# X: pd.DataFrame, independent variables, str
# y: list, np.array or pd.Series, dependent variables, int or str
# list of classes for this model
self.classes_ = list(set(list(y)))
# for calculation of P(y)
self.P_y = Counter(y)
# self.P[yj][Xi][xi] = P(xi|yj) where Xi is the feature name and xi is the feature value, yj is a specific class label
# make sure to use self.alpha in the __init__() function as the smoothing factor when calculating P(xi|yj)
self.P = {}
return
def predict_proba(self, X):
# X: pd.DataFrame, independent variables, str
# prob is a dict of prediction probabilities belonging to each categories
# return probs = pd.DataFrame(list of prob, columns = self.classes_)
# write your code below
probs = {}
for label in self.classes_:
p = self.P_y[label]
for key in X:
p *= X[key].apply(lambda value: self.P[label][key][value] if value in self.P[label][key] else 1)
probs[label] = p
probs = | pd.DataFrame(probs, columns=self.classes_) | pandas.DataFrame |
# dkscraper.py
import datetime
import logging
from pathlib import Path
import numpy as np
import pandas as pd
DATA_DIR = Path(__file__).parent / 'data'
OFFENSE_PLAY_TYPES = ('pass', 'run', 'qb_spike', 'qb_kneel')
OFFENSE_IMPORTANT_PLAYS = ('pass', 'run')
def convert_top(t):
"""Converts time of possession string to seconds
Args:
t (str): e.g. '1:30'
Returns:
int: e.g. 90
"""
try:
m, s = [int(c) for c in t.split(':')]
return m * 60 + s
except (AttributeError, ValueError):
return 0
def current_season():
"""Gets current season year
Returns:
int: e.g. 2020
"""
td = datetime.datetime.today()
if td.month > 8:
return td.year
return td.year - 1
def current_season_week(sched=None):
"""Gets current week of current season
Args:
sched (DataFrame): default None
Returns:
int: 1 - 17
"""
if sched is None or sched.empty:
sched = schedule()
td = datetime.datetime.today()
seas = current_season()
week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()
this_week = week_starts.loc[week_starts < td].max()
return week_starts.loc[week_starts == this_week].index.values[0]
def dst(df):
"""Gets core dst stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
pass
def gamesmeta(sched=None):
"""Converts schedule to one row per team, two per game"""
if sched is None or sched.empty:
sched = schedule()
h = sched.copy()
a = sched.copy()
h = h.rename(columns={'home_team': 'team', 'away_team': 'opp'})
a = a.rename(columns={'away_team': 'team', 'home_team': 'opp'})
return pd.concat([a, h]).sort_values('game_id')
def passing(df):
"""Gets core passing stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
pass
def plays(df):
"""Gets core pace/plays from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
tp = (
df.query('play_type in @OFFENSE_PLAY_TYPES')
.pivot_table(index=['game_id', 'posteam'],
columns=['play_type'],
values=['play_id'],
aggfunc='count',
fill_value=0)
.pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))
.reset_index()
)
tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)
tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])
tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])
return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')
def receiving(df):
"""Gets core receiving stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
targets, receptions, rec_yards, rec_tds,
total_air_yards, inc_air_yards, yac
"""
cols = ['player', 'targets', 'receptions', 'rec_yards', 'rec_tds',
'total_air_yards', 'inc_air_yards', 'yac']
return (
df
.query('play_type == "pass"')
.groupby(['game_id', 'receiver_player_id'])
.agg(targets=('play_type', 'count'),
receptions=('complete_pass', 'sum'),
rec_yards=('yards_gained', 'sum'),
rec_tds=('pass_touchdown', 'sum'),
total_air_yards=('air_yards', 'sum'),
yac=('yards_after_catch', 'sum'))
.assign(inc_air_yards=lambda x: x['total_air_yards'] + x['yac'] - x['rec_yards'])
.reset_index(level=0, drop=True)
.join(df.groupby('receiver_player_id').first()['receiver_player_name'], how='left')
.reset_index(drop=True)
.rename(columns={'receiver_player_name': 'player'})
.loc[:, cols]
.set_index('player')
.fillna(0)
.astype(int)
)
def rushing(df, add_success=False):
"""Gets core rushing stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
add_success (bool): add success data, default False
Returns:
DataFrame with columns
'rush_att', 'rush_yards', 'rush_tds'
"""
tmp = (
df
.query('play_type == "run"')
.rename(columns={'rusher_player_id': 'player_id', 'rusher_player_name': 'player'})
.groupby(['game_id', 'player_id', 'player'])
.agg(rush_att=('rush_attempt', 'sum'),
rush_yards=('yards_gained', 'sum'),
rush_tds=('rush_touchdown', 'sum'))
.droplevel(0)
.fillna(0)
.astype(int)
)
if add_success:
s = rushing_success_rate(df)
tmp = tmp.join(s.drop('rushes', axis=1), how='left')
return tmp
def rushing_success(row):
"""Determines whether rushing play was success
Based on Chase Stuart / Football perspectives
Returns:
int: 1 if success, 0 otherwise
"""
success = 0
if row.down == 1:
if row.yards_gained >= 6:
success = 1
elif row.yards_gained >= row.ydstogo * .4:
success = 1
elif row.down == 2:
if row.yards_gained >= 6:
success = 1
elif row.yards_gained >= row.ydstogo * .5:
success = 1
elif row.down > 3:
if row.yards_gained >= row.ydstogo:
success = 1
return success
def rushing_success_rate(df):
"""Calculates rushing success rate"""
df['success'] = df.apply(rushing_success, axis=1)
criteria = (df.down > 2) & (df.ydstogo > 5)
return (
df
.loc[~criteria, :]
.rename(columns={'rusher_player_id': 'player_id', 'rusher_player_name': 'player'})
.groupby(['game_id', 'player_id', 'player'])
.agg(successes=('success', 'sum'),
rushes=('rush_attempt', 'sum'))
.assign(success_rate=lambda df_: df_.successes / df_.rushes)
.droplevel(0)
)
def schedule(fn=None):
"""Gets schedule"""
if not fn:
fn = DATA_DIR / 'schedule.parquet'
return pd.read_parquet(fn)
def situation(df):
"""Gets situational rates"""
tmp1 = plays(df.loc[df.score_differential.abs() <= 6, :])
tmp1['situation_type'] = 'Neutral'
tmp2 = plays(df.loc[df.score_differential > 6, :])
tmp2['situation_type'] = 'Ahead'
tmp3 = plays(df.loc[df.score_differential < -6, :])
tmp3['situation_type'] = 'Behind'
return (
| pd.concat([tmp1, tmp2, tmp3], axis=0, ignore_index=True) | pandas.concat |
from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype:
window = 2
funcs = {
"count": lambda v: v.count(),
"max": lambda v: v.max(),
"min": lambda v: v.min(),
"sum": lambda v: v.sum(),
"mean": lambda v: v.mean(),
"std": lambda v: v.std(),
"var": lambda v: v.var(),
"median": lambda v: v.median(),
}
def get_expects(self):
expects = {
"sr1": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 1, 2, 3, 4], dtype="float64"),
"min": Series([np.nan, 0, 1, 2, 3], dtype="float64"),
"sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"),
"mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
"std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"),
"var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"),
"median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
},
"sr2": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 10, 8, 6, 4], dtype="float64"),
"min": Series([np.nan, 8, 6, 4, 2], dtype="float64"),
"sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"),
"mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
"std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"),
"var": Series([np.nan, 2, 2, 2, 2], dtype="float64"),
"median": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
},
"sr3": {
"count": Series([1, 2, 2, 1, 1], dtype="float64"),
"max": Series([np.nan, 1, 2, np.nan, np.nan], dtype="float64"),
"min": Series([np.nan, 0, 1, np.nan, np.nan], dtype="float64"),
"sum": Series([np.nan, 1, 3, np.nan, np.nan], dtype="float64"),
"mean": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
"std": Series(
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2, dtype="float64"
),
"var": Series([np.nan, 0.5, 0.5, np.nan, np.nan], dtype="float64"),
"median": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
},
"df": {
"count": DataFrame(
{0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])},
dtype="float64",
),
"max": DataFrame(
{0: Series([np.nan, 2, 4, 6, 8]), 1: | Series([np.nan, 3, 5, 7, 9]) | pandas.Series |
import os
import gc
import time
import imghdr
from io import BytesIO
from typing import List, Optional
from datetime import datetime
import requests
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm # if you don't use IPython Kernel like jupyter, you should change "tqdm.notebook" to "tqdm"
from cairosvg import svg2png
from PIL import Image
import cv2
def get_opensea_api_key():
return os.getenv('OPENSEA_API_KEY')
def get_comet_ml_key():
return os.getenv('COMET_ML_KEY')
def is_image(url) -> bool:
"""
Determine if it is an image of png or jpeg.
Parameters
----------
url : str
Target url.
Returns
-------
True or False: Return True if this url content is an image of png or jpeg else returns False.
"""
img = requests.get(url).content
img_type = imghdr.what(None, h=img)
if img_type in ['png', 'jpeg']:
return True
else:
return False
def is_svg(url) -> bool:
"""
Determine if it is an image of svg.
Parameters
----------
url : str
Target url.
Returns
-------
True or False: Return True if this url content is an image of svg else returns False.
"""
if url.endswith(".svg"):
return True
else:
return False
def save_png(url, file_name) -> None:
"""
Save an image of png or jpeg as a png file.
Parameters
----------
url : str
Target url.
file_name : str
The file path of a saved png file.
Returns
-------
None
"""
img = requests.get(url).content
img = Image.open(BytesIO(img)).convert("RGBA")
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)
cv2.imwrite(file_name, img, [int(cv2.IMWRITE_PNG_COMPRESSION), 3])
def save_svg(url, file_name) -> None:
"""
Save an image of svg as an svg file. The content that is svg data of animation can't save.
Parameters
----------
url : str
Target url.
file_name : str
The file path of a saved png file.
Returns
-------
None
"""
img = requests.get(url).content
img = svg2png(bytestring=img)
img = Image.open(BytesIO(img)).convert("RGBA")
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGBA2BGRA)
cv2.imwrite(file_name, img)
def get_random_data(dir_name: str, num_loop: Optional[int] = None,
start_id: int = 0, is_test: bool = False) -> pd.DataFrame:
"""
Get data of NFT to be had registered OpenSea by using OpenSea API.
You can get a large amount of data randomly. If you want to change data to be acquired,
you should view the reference of OpenSea API and change 'params' and 'get_features'.
Also, I set the delay to reduce the server load by 1 process per 1 min.
Please change according to your preference(within the bounds of common sense).
If got data is other than png, jpeg, and svg(still image), the data can't save
(but continue the process).
Parameter
---------
dir_name : str
Directory path to save images.
num_loop : int
A number of loops. A number of getting data is 'num_loop' * 50
is_test : bool
Get small data(num_loop=5) regardless value of num_loop if you set "is_test=True".
Returns
-------
df : pd.DataFrame
The DataFrame consists of NFT data includes all image ids etc...
See Also
--------
get_features : list of str
There are hundreds of columns of original data, but the number of data to be
acquired is limited. Please change according to your preference if column names
that you need are not included.
params : dict of requests parameters
Like get_featrues, Please change according to your preference if you want to change
getting data.
"""
DATAPATH = dir_name
df = pd.DataFrame()
img_id = start_id
url = "https://api.opensea.io/api/v1/assets"
if is_test or num_loop is None:
num_loop = 5
print("This function execute on test mode(automatically changes to num_loop=5).")
for idx in tqdm(range(num_loop)):
try:
params = {"limit": "50",
"order_by": "sale_date",
"order_direction": "desc",
"offset": str(50*idx)}
response = requests.get(url, params=params)
data = response.json()
assets_df = pd.json_normalize(data['assets'])
for feature in assets_df.columns.values:
if feature not in df.columns.values:
df[feature] = None
for feature in df.columns.values:
if feature not in assets_df.columns.values:
assets_df[feature] = None
for i in range(assets_df.shape[0]):
img_url = assets_df.iloc[i]['image_url']
img_url.replace(" ", "")
if is_image(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_png(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
elif is_svg(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_svg(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
else:
continue
gc.collect() # Just in case, free the memory so that the process does not stop
time.sleep(60)
except:
gc.collect()
time.sleep(60)
continue
df = df.reset_index(drop=True)
df['image_id'] = (df.index.values.astype(int)+start_id).astype(str)
df['image_id'] = df['image_id'].apply(lambda x: x + '.png')
return df
def get_collection_data(dir_name: str, target_collections: Optional[List[str]] = None,
is_test: bool = False) -> pd.DataFrame:
"""
Get data of NFT to be had registered OpenSea by using OpenSea API.
You can get a large amount of data you prefer collection. If you want to change data to be acquired,
you should view the reference of OpenSea API and change 'params' and 'get_features'.
Also, I set the delay to reduce the server load by 1 process per 1 min.
Please change according to your preference(within the bounds of common sense).
If got data is other than png, jpeg, and svg(still image), the data can't save
(but continue the process).
Parameter
---------
dir_name : str
Directory path to save images.
target_collections : list of str
The list of collection names you prefer.
This variable can be set None, but you must set is_test=True.
is_test : bool
Get small data regardless values of target_collections if you set "is_test=True".
Returns
-------
df : pd.DataFrame
The DataFrame consists of NFT data includes all image ids etc...
See Also
--------
get_features : list of str
There are hundreds of columns of original data, but the number of data to be
acquired is limited. Please change according to your preference if column names
that you need are not included.
params : dict of requests parameters
Like get_featrues, Please change according to your preference if you want to change
getting data.
"""
DATAPATH = dir_name
e_count = 0
e_collection = []
if is_test:
print("This function execute on test mode.")
print("Automatically set target_collections:\n['cryptopunks', 'boredapeyachtclub', 'doodles-official']")
target_collections = ['cryptopunks', 'boredapeyachtclub', 'doodles-official']
df = pd.DataFrame()
img_id = 0
url = "https://api.opensea.io/api/v1/assets"
for collection in target_collections:
for idx in tqdm(range(10), ascii=True, desc=collection):
try:
params = {
"offset": str(50*idx),
"order_by": "sale_date",
"order_direction": "desc",
"limit": "50",
"collection": collection
}
response = requests.get(url, params=params)
data = response.json()
assets_df = pd.json_normalize(data['assets'])
for feature in assets_df.columns.values:
if feature not in df.columns.values:
df[feature] = None
for feature in df.columns.values:
if feature not in assets_df.columns.values:
assets_df[feature] = None
for i in range(assets_df.shape[0]):
img_url = assets_df.iloc[i]['image_url']
img_url.replace(" ", "")
if is_image(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_png(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
elif is_svg(img_url):
file_name = os.path.join(DATAPATH, f"{img_id}.png")
save_svg(img_url, file_name)
df = df.append(assets_df.iloc[i])
img_id += 1
else:
continue
gc.collect() # Just in case, free the memory so that the process does not stop
time.sleep(60)
except:
e_count += 1
e_collection.append(collection)
gc.collect()
time.sleep(60)
continue
print(f"error count: {e_count}")
print(f"error collection: {list(set(e_collection))}")
df = df.reset_index(drop=True)
df['image_id'] = df.index.values.astype(str)
df['image_id'] = df['image_id'].apply(lambda x: x + '.png')
return df
def get_data(asset_contract_address: str, token_id: str):
"""
Get the asset data.
Parameters
----------
asset_contract_address : str
The string of asset contract address.
token_id : str
The string of token id.
Returns
-------
orders_df : pd.DataFrame
The dataframe of asset data.
"""
if type(token_id) != str:
token_id = str(token_id)
url = f"https://api.opensea.io/api/v1/asset/{asset_contract_address}/{token_id}/"
response = requests.request("GET", url)
data = response.json()
asset_df = | pd.json_normalize(data) | pandas.json_normalize |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import joblib
import xgboost as xgb
import os
import tempfile
import oss2
import json
import pandas as pd
from sklearn import datasets
logger = logging.getLogger(__name__)
def extract_xgbooost_cluster_env():
"""
Extract the cluster env from pod
:return: the related cluster env to build rabit
"""
logger.info("starting to extract system env")
master_addr = os.environ.get("MASTER_ADDR", "{}")
master_port = int(os.environ.get("MASTER_PORT", "{}"))
rank = int(os.environ.get("RANK", "{}"))
world_size = int(os.environ.get("WORLD_SIZE", "{}"))
logger.info("extract the Rabit env from cluster :"
" %s, port: %d, rank: %d, word_size: %d ",
master_addr, master_port, rank, world_size)
return master_addr, master_port, rank, world_size
def read_train_data(rank, num_workers, path):
"""
Read file based on the rank of worker.
We use the sklearn.iris data for demonstration
You can extend this to read distributed data source like HDFS, HIVE etc
:param rank: the id of each worker
:param num_workers: total number of workers in this cluster
:param path: the input file name or the place to read the data
:return: XGBoost Dmatrix
"""
iris = datasets.load_iris()
x = iris.data
y = iris.target
start, end = get_range_data(len(x), rank, num_workers)
x = x[start:end, :]
y = y[start:end]
x = pd.DataFrame(x)
y = pd.DataFrame(y)
dtrain = xgb.DMatrix(data=x, label=y)
logging.info("Read data from IRIS data source with range from %d to %d",
start, end)
return dtrain
def read_predict_data(rank, num_workers, path):
"""
Read file based on the rank of worker.
We use the sklearn.iris data for demonstration
You can extend this to read distributed data source like HDFS, HIVE etc
:param rank: the id of each worker
:param num_workers: total number of workers in this cluster
:param path: the input file name or the place to read the data
:return: XGBoost Dmatrix, and real value
"""
iris = datasets.load_iris()
x = iris.data
y = iris.target
start, end = get_range_data(len(x), rank, num_workers)
x = x[start:end, :]
y = y[start:end]
x = | pd.DataFrame(x) | pandas.DataFrame |
import tensorflow as tf
import pandas as pd
import readers
import main
import kmean as km
df= | pd.read_pickle("user_item_table_train.pkl") | pandas.read_pickle |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `gffpandas` package."""
import gffpandas.gffpandas as gff3pd
import pandas as pd
import os
written_df = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type='
'genomic DNA;serovar=Typhimurium;strain=SL1344'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
['NC_016810.1', 'RefSeq', 'CDS', 34, 335, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"])
written_header = ('##gff-version 3\n'
'##sequence-region NC_016810.1 1 20\n')
written_csv = ('seq_id,source,type,start,end,score,strand,phase,attributes\n'
'NC_016810.1,RefSeq,region,1,4000,.,+,.,Dbxref=taxon:216597;ID='
'id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;serovar='
'Typhimurium;strain=SL1344\n'
'NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene1;Name=thrL;gbkey='
'Gene;gene=thrL;locus_tag=SL1344_0001\n'
'NC_016810.1,RefSeq,CDS,13,235,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene2;Name=thrA;gbkey='
'Gene;gene=thrA;locus_tag=SL1344_0002\n'
'NC_016810.1,RefSeq,CDS,341,523,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,1,600,.,-,.,ID=gene3;Name=thrX;gbkey='
'Gene;gene=thrX;locus_tag=SL1344_0003\n'
'NC_016810.1,RefSeq,CDS,21,345,.,-,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,41,255,.,+,.,ID=gene4;Name=thrB;gbkey='
'Gene;gene=thrB;locus_tag=SL1344_0004\n'
'NC_016810.1,RefSeq,CDS,61,195,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,170,546,.,+,.,ID=gene5;Name=thrC;gbkey'
'=Gene;gene=thrC;locus_tag=SL1344_0005\n'
'NC_016810.1,RefSeq,CDS,34,335,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n')
written_tsv = ('seq_id\tsource\ttype\tstart\tend\tscore\tstrand\tphase\t'
'attributes\n'
'NC_016810.1\tRefSeq\tregion\t1\t4000\t.\t+\t.\tDbxref=taxon:21'
'6597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;'
'serovar=Typhimurium;strain=SL1344\n'
'NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene1;Name=thrL;'
'gbkey=Gene;gene=thrL;locus_tag=SL1344_0001\n'
'NC_016810.1\tRefSeq\tCDS\t13\t235\t.\t+\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene1;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene2;Name=thrA;'
'gbkey=Gene;gene=thrA;locus_tag=SL1344_0002\n'
'NC_016810.1\tRefSeq\tCDS\t341\t523\t.\t+\t0\tDbxref=UniProtKB%'
'252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_005'
'179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t1\t600\t.\t-\t.\tID=gene3;Name=thrX'
';gbkey=Gene;gene=thrX;locus_tag=SL1344_0003\n'
'NC_016810.1\tRefSeq\tCDS\t21\t345\t.\t-\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene3;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t41\t255\t.\t+\t.\tID=gene4;Name='
'thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004\n'
'NC_016810.1\tRefSeq\tCDS\t61\t195\t.\t+\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene4;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t170\t546\t.\t+\t.\tID=gene5;Name='
'thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005\n'
'NC_016810.1\tRefSeq\tCDS\t34\t335\t.\t+\t0\tDbxref=UniProt'
'KB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name='
'YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon '
'leader peptide;protein_id=YP_005179941.1;transl_table=11\n')
written_gff = ('##gff-version 3\n'
'##sequence-region NC_016810.1 1 20\n'
'NC_016810.1 RefSeq region 1 4000 . +'
' . Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=ge'
'nomic;mol_type=genomic DNA;serovar=Typhimurium;strain=SL1344\n'
'NC_016810.1 RefSeq gene 1 20 . +'
' . ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_'
'tag=SL1344_0001\n'
'NC_016810.1 RefSeq CDS 13 235 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene1;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 1 20 . +'
' . ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_'
'tag=SL1344_0002\n'
'NC_016810.1 RefSeq CDS 341 523 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene2;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 1 600 . -'
' . ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_'
'tag=SL1344_0003\n'
'NC_016810.1 RefSeq CDS 21 345 . -'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene3;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 41 255 . +'
' . ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_'
'tag=SL1344_0004\n'
'NC_016810.1 RefSeq CDS 61 195 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene4;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 170 546 . +'
' . ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_'
'tag=SL1344_0005\n'
'NC_016810.1 RefSeq CDS 34 335 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene5;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n')
written_filtered_length = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[1, 2, 3, 4, 7, 8])
compare_get_feature_by_attribute = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[1, 3, 5, 7, 9])
compare_get_feature_by_attribute2 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[4, 6, 8])
written_attribute_df = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic'
' DNA;serovar=Typhimurium;strain=SL1344',
'taxon:216597', 'id0', None, None, 'Src', None, 'genomic',
None, 'genomic DNA', None, None, 'Typhimurium', 'SL1344',
None],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001',
None, 'gene1', 'thrL', None, 'Gene', 'thrL', None,
'SL1344_0001', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1',
'cds0', 'YP_005179941.1', 'gene1', 'CDS', None, None,
None, None, 'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002',
None, 'gene2', 'thrA', None, 'Gene', 'thrA', None,
'SL1344_0002', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene2', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003',
None, 'gene3', 'thrX', None, 'Gene', 'thrX', None,
'SL1344_0003', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene3', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004',
None, 'gene4', 'thrB', None, 'Gene', 'thrB', None,
'SL1344_0004', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene4', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005',
None, 'gene5', 'thrC', None, 'Gene', 'thrC', None,
'SL1344_0005', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 34, 335, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene5', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes", "Dbxref",
"ID", "Name", "Parent", "gbkey", "gene", "genome",
"locus_tag", "mol_type", "product", "protein_id",
"serovar", "strain", "transl_table"])
strand_counts = pd.value_counts(written_df['strand']).to_dict()
type_counts = pd.value_counts(written_df['type']).to_dict()
compare_stats_dic = {
'Maximal_bp_length':
599,
'Minimal_bp_length':
19,
'Counted_strands':
strand_counts,
'Counted_feature_types':
type_counts
}
df_empty = pd.DataFrame({}, columns=["seq_id", "source", "type", "start",
"end", "score", "strand", "phase",
"attributes"], index=[])
redundant_entry = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[3])
compare_filter_feature_df = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[1, 3, 5, 7, 9])
compare_overlap_gene_1_40 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[1, 3])
compare_overlap_40_300 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA'
';serovar=Typhimurium;strain=SL1344'],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
['NC_016810.1', 'RefSeq', 'CDS', 34, 335, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[0, 2, 7, 8, 9, 10])
compare_overlap_170_171 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[5, 6])
compare_overlap_525_545 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA'
';serovar=Typhimurium;strain=SL1344'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[0, 9])
compare_overlap_341_500 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA'
';serovar=Typhimurium;strain=SL1344'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader pep'
'tide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[0, 4, 9])
compare_complement = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end", "score",
"strand", "phase", "attributes"],
index=[1, 3, 4])
def generate_gff3_df():
read_in_file = gff3pd.read_gff3('fixtures/test_file.gff')
return read_in_file
def test_read_gff3_if_df_type():
gff3_df = generate_gff3_df()
assert type(gff3_df) == gff3pd.Gff3DataFrame
| pd.testing.assert_frame_equal(gff3_df.df, written_df) | pandas.testing.assert_frame_equal |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
from enum import Enum
from pydash import get
from typing import List, Optional, Union
from gs_quant.api.gs.assets import AssetClass, GsAssetApi
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.monitors import GsMonitorsApi
from gs_quant.base import EnumBase
from gs_quant.datetime.date import prev_business_date
from gs_quant.session import GsSession
from gs_quant.target.data import DataQuery
class BasketType(EnumBase, Enum):
""" Basket Types """
CUSTOM_BASKET = 'Custom Basket'
RESEARCH_BASKET = 'Research Basket'
def __repr__(self):
return self.value
@classmethod
def to_list(cls):
return [basket_type.value for basket_type in cls]
class CorporateActionType(EnumBase, Enum):
""" Different types of corporate actions """
ACQUISITION = 'Acquisition'
CASH_DIVIDEND = 'Cash Dividend'
IDENTIFIER_CHANGE = 'Identifier Change'
RIGHTS_ISSUE = 'Rights Issue'
SHARE_CHANGE = 'Share Change'
SPECIAL_DIVIDEND = 'Special Dividend'
SPIN_OFF = 'Spin Off'
STOCK_DIVIDEND = 'Stock Dividend'
STOCK_SPLIT = 'Stock Split'
def __repr__(self):
return self.value
@classmethod
def to_list(cls):
return [ca_type.value for ca_type in cls]
class CustomBasketStyles(EnumBase, Enum):
""" Styles for Custom Baskets """
ASIA = 'Asia'
BESPOKE = 'Bespoke'
CONSUMER = 'Consumer'
EM = 'EM'
ENERGY = 'Energy'
ESG = 'ESG'
ETF_REPLICATION = 'ETF Replication'
EU = 'EU'
FACTORS = 'Factors'
FINANCIALS = 'Financials'
FUNDAMENTAL = 'Fundamental'
GEOGRAPHIC = 'Geographic'
GLOBAL = 'Global'
GROWTH = 'Growth'
HEALTHCARE = 'Healthcare'
INDUSTRIALS = 'Industrials'
INDUSTRY = 'Sub-Sectors / Smart Industries'
MACRO = 'Macro'
MACRO_SECTORS = 'Macro Sectors'
MATERIALS = 'Materials'
MICRO_SECTORS = 'Micro Sectors'
MOMENTUM = 'Momentum'
PIPG = 'PIPG'
POSITIONING = 'Positioning'
SINGLE_STOCKS = 'Single Stocks'
SIZE = 'Size'
STRUCTURED_1_DELTA = 'Structured 1 Delta'
THEMATIC = 'Thematic'
TMT = 'TMT'
TOPICAL = 'Topical'
US = 'US'
UTILITIES = 'Utilities'
VALUE = 'Value'
VOLATILITY = 'Volatility'
def __repr__(self):
return self.value
class FundamentalsMetrics(EnumBase, Enum):
""" Metric for the associated asset """
DIVIDEND_YIELD = 'Dividend Yield'
EARNINGS_PER_SHARE = 'Earnings per Share'
EARNINGS_PER_SHARE_POSITIVE = 'Earnings per Share Positive'
NET_DEBT_TO_EBITDA = 'Net Debt to EBITDA'
PRICE_TO_BOOK = 'Price to Book'
PRICE_TO_CASH = 'Price to Cash'
PRICE_TO_EARNINGS = 'Price to Earnings'
PRICE_TO_EARNINGS_POSITIVE = 'Price to Earnings Positive'
PRICE_TO_SALES = 'Price to Sales'
RETURN_ON_EQUITY = 'Return on Equity'
SALES_PER_SHARE = 'Sales per Share'
def __repr__(self):
return self.value
@classmethod
def to_list(cls):
return [metric.value for metric in cls]
class FundamentalMetricPeriod(EnumBase, Enum):
""" Period for the relevant metric """
ONE_YEAR = '1y'
TWO_YEARS = '2y'
THREE_YEARS = '3y'
def __repr__(self):
return self.value
class FundamentalMetricPeriodDirection(EnumBase, Enum):
""" Direction of the outlook period """
FORWARD = 'forward'
TRAILING = 'trailing'
def __repr__(self):
return self.value
class IndicesDatasets(EnumBase, Enum):
""" Indices Datasets """
BASKET_FUNDAMENTALS = 'BASKET_FUNDAMENTALS'
CORPORATE_ACTIONS = 'CA'
GIRBASKETCONSTITUENTS = 'GIRBASKETCONSTITUENTS'
GSBASKETCONSTITUENTS = 'GSBASKETCONSTITUENTS'
GSCB_FLAGSHIP = 'GSCB_FLAGSHIP'
def __repr__(self):
return self.value
class Region(EnumBase, Enum):
""" Region of the index """
AMERICAS = 'Americas'
ASIA = 'Asia'
EM = 'EM'
EUROPE = 'Europe'
GLOBAL = 'Global'
def __repr__(self):
return self.value
class ResearchBasketStyles(EnumBase, Enum):
""" Styles for Research Baskets """
ASIA_EX_JAPAN = 'Asia ex-Japan'
EQUITY_THEMATIC = 'Equity Thematic'
EUROPE = 'Europe'
FUND_OWNERSHIP = 'Fund Ownership'
FUNDAMENTALS = 'Fundamentals'
FX_OIL = 'FX/Oil'
GEOGRAPHICAL_EXPOSURE = 'Geographical Exposure'
HEDGE_FUND = 'Hedge Fund'
IP_FACTORS = 'Investment Profile (IP) Factors'
JAPAN = 'Japan'
MACRO = 'Macro'
MACRO_SLICE_STYLES = 'Macro Slice/Styles'
MUTUAL_FUND = 'Mutual Fund'
POSITIONING = 'Positioning'
PORTFOLIO_STRATEGY = 'Portfolio Strategy'
RISK_AND_LIQUIDITY = 'Risk & Liquidity'
SECTOR = 'Sector'
SHAREHOLDER_RETURN = 'Shareholder Return'
STYLE_FACTOR_AND_FUNDAMENTAL = 'Style, Factor and Fundamental'
STYLES_THEMES = 'Style/Themes'
TACTICAL_RESEARCH = 'Tactical Research'
THEMATIC = 'Thematic'
US = 'US'
WAVEFRONT_COMPONENTS = 'Wavefront Components'
WAVEFRONT_PAIRS = 'Wavefront Pairs'
WAVEFRONTS = 'Wavefronts'
def __repr__(self):
return self.value
class ReturnType(EnumBase, Enum):
""" Determines the index calculation methodology with respect to dividend reinvestment """
GROSS_RETURN = 'Gross Return'
PRICE_RETURN = 'Price Return'
TOTAL_RETURN = 'Total Return'
def __repr__(self):
return self.value
class WeightingStrategy(EnumBase, Enum):
""" Strategy used to price the index's position set """
EQUAL = 'Equal'
MARKET_CAPITALIZATION = 'Market Capitalization'
QUANTITY = 'Quantity'
WEIGHT = 'Weight'
def __repr__(self):
return self.value
def get_my_baskets(user_id: str = None) -> Optional[pd.DataFrame]:
"""
Retrieve a list of baskets a user is permissioned to
:param user_id: Marquee user/app ID (default is current application's id)
:return: dataframe of baskets user has access to
**Usage**
Retrieve a list of baskets a user is permissioned to
**Examples**
Retrieve a list of baskets the current user is permissioned to
>>> from gs_quant.markets.indices_utils import *
>>>
>>> get_my_baskets()
"""
user_id = user_id if user_id is not None else GsSession.current.client_id
tag = f'Custom Basket:{user_id}'
response = GsMonitorsApi.get_monitors(tags=tag)
if len(response):
row_groups = get(response, '0.parameters.row_groups')
my_baskets = []
for row_group in row_groups:
entity_ids = [entity.id for entity in row_group.entity_ids]
baskets = GsAssetApi.get_many_assets_data(id=entity_ids, fields=['id', 'ticker', 'name', 'liveDate'])
my_baskets += [dict(monitor_name=row_group.name, id=get(basket, 'id'), ticker=get(basket, 'ticker'),
name=get(basket, 'name'), live_date=get(basket, 'liveDate')) for basket in baskets]
return pd.DataFrame(my_baskets)
def get_flagship_baskets(fields: [str] = [],
basket_type: List[BasketType] = BasketType.to_list(),
asset_class: List[AssetClass] = [AssetClass.Equity],
region: List[Region] = None,
styles: List[Union[CustomBasketStyles, ResearchBasketStyles]] = None,
as_of: dt.datetime = None) -> pd.DataFrame:
"""
Retrieve flagship baskets
:param fields: Fields to retrieve in addition to mqid, name, ticker, region, basket type, \
description, styles, live date, and asset class
:param basket_type: Basket type(s)
:param asset_class: Asset class (defaults to Equity)
:param region: Basket region(s)
:param styles: Basket style(s)
:param as_of: Datetime for which to retrieve baskets (defaults to current time)
:return: flagship baskets
**Usage**
Retrieve a list of flagship baskets
**Examples**
Retrieve a list of flagship baskets
>>> from gs_quant.markets.indices_utils import *
>>>
>>> get_flagship_baskets()
**See also**
:func:`get_flagships_with_assets` :func:`get_flagships_performance` :func:`get_flagships_constituents`
"""
fields = list(set(fields).union(set(['id', 'name', 'ticker', 'region', 'type', 'description',
'styles', 'liveDate', 'assetClass'])))
query = dict(fields=fields, type=basket_type, asset_class=asset_class, is_pair_basket=[False], flagship=[True])
if region is not None:
query.update(region=region)
if styles is not None:
query.update(styles=styles)
response = GsAssetApi.get_many_assets_data_scroll(**query, as_of=as_of, limit=2000, scroll='1m')
return pd.DataFrame(response)
def get_flagships_with_assets(identifiers: List[str],
fields: [str] = [],
basket_type: List[BasketType] = BasketType.to_list(),
asset_class: List[AssetClass] = [AssetClass.Equity],
region: List[Region] = None,
styles: List[Union[CustomBasketStyles, ResearchBasketStyles]] = None,
as_of: dt.datetime = None) -> pd.DataFrame:
"""
Retrieve a list of flagship baskets containing specified assets
:param identifiers: List of asset identifiers
:param fields: Fields to retrieve in addition to mqid, name, ticker, region, basket type, \
description, styles, live date, and asset class
:param basket_type: Basket type(s)
:param asset_class: Asset class (defaults to Equity)
:param region: Basket region(s)
:param styles: Basket style(s)
:param as_of: Datetime for which to retrieve baskets (defaults to current time)
:return: flagship baskets containing specified assets
**Usage**
Retrieve a list of flagship baskets containing specified assets
**Examples**
Retrieve a list of flagship custom baskets containing 'AAPL UW' single stock
>>> from gs_quant.markets.indices_utils import *
>>>
>>> get_flagships_with_assets(identifiers=['AAPL UW'], basket_type=[BasketType.CUSTOM_BASKET])
**See also**
:func:`get_flagship_baskets` :func:`get_flagships_performance` :func:`get_flagships_constituents`
"""
fields = list(set(fields).union(set(['id', 'name', 'ticker', 'region', 'type', 'description',
'styles', 'liveDate', 'assetClass'])))
response = GsAssetApi.resolve_assets(identifier=identifiers, fields=['id'], limit=1)
mqids = [get(asset, '0.id') for asset in response.values()]
query = dict(fields=fields, type=basket_type, asset_class=asset_class, is_pair_basket=[False],
flagship=[True], underlying_asset_ids=mqids)
if region is not None:
query.update(region=region)
if styles is not None:
query.update(styles=styles)
response = GsAssetApi.get_many_assets_data_scroll(**query, as_of=as_of, limit=2000, scroll='1m')
return pd.DataFrame(response)
def get_flagships_performance(fields: [str] = [],
basket_type: List[BasketType] = BasketType.to_list(),
asset_class: List[AssetClass] = [AssetClass.Equity],
region: List[Region] = None,
styles: List[Union[CustomBasketStyles, ResearchBasketStyles]] = None,
start: dt.date = None,
end: dt.date = None,) -> pd.DataFrame:
"""
Retrieve performance data for flagship baskets
:param fields: Fields to retrieve in addition to bbid, mqid, name, region, basket type, \
styles, live date, and asset class
:param basket_type: Basket type(s)
:param asset_class: Asset class (defaults to Equity)
:param region: Basket region(s)
:param styles: Basket style(s)
:param start: Date for which to retrieve pricing (defaults to previous business day)
:param end: Date for which to retrieve pricing (defaults to previous business day)
:return: pricing data for flagship baskets
**Usage**
Retrieve performance data for flagship baskets
**Examples**
Retrieve performance data for flagship Asia custom baskets
>>> from gs_quant.markets.indices_utils import *
>>>
>>> get_flagships_performance(basket_type=[BasketType.CUSTOM_BASKET], region=[Region.ASIA])
**See also**
:func:`get_flagships_with_assets` :func:`get_flagship_baskets` :func:`get_flagships_constituents`
"""
start = start or prev_business_date()
end = end or prev_business_date()
fields = list(set(fields).union(set(['name', 'region', 'type', 'flagship', 'isPairBasket',
'styles', 'liveDate', 'assetClass'])))
coverage = GsDataApi.get_coverage(dataset_id=IndicesDatasets.GSCB_FLAGSHIP.value, fields=fields)
basket_regions = [] if region is None else [r.value for r in region]
basket_styles = [] if styles is None else [s.value for s in styles]
basket_types = [b_type.value for b_type in basket_type]
baskets_map = {}
for basket in coverage:
if get(basket, 'flagship') is False or get(basket, 'isPairBasket') is True or \
region is not None and get(basket, 'region') not in basket_regions or \
get(basket, 'type') not in basket_types or \
get(basket, 'assetClass') not in [a.value for a in asset_class] or \
styles is not None and not any(s in get(basket, 'styles', []) for s in basket_styles):
continue
baskets_map[get(basket, 'assetId')] = basket
response = GsDataApi.query_data(query=DataQuery(where=dict(assetId=list(baskets_map.keys())),
startDate=start, endDate=end), dataset_id=IndicesDatasets.GSCB_FLAGSHIP.value)
performance = []
for basket in response:
basket_data = baskets_map[get(basket, 'assetId')]
basket_data.update(closePrice=get(basket, 'closePrice'))
basket_data.update(date=get(basket, 'date'))
performance.append(basket_data)
return | pd.DataFrame(performance) | pandas.DataFrame |
# coding:utf-8
# 用 ARMA 进行时间序列预测
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARMA
from statsmodels.graphics.api import qqplot
# 创建数据
data = [5922, 5308, 5546, 5975, 2704, 1767, 4111, 5542, 4726, 5866, 6183, 3199, 1471, 1325, 6618, 6644, 5337, 7064, 2912, 1456, 4705, 4579, 4990, 4331, 4481, 1813, 1258, 4383, 5451, 5169, 5362, 6259, 3743, 2268, 5397, 5821, 6115, 6631, 6474, 4134, 2728, 5753, 7130, 7860, 6991, 7499, 5301, 2808, 6755, 6658, 7644, 6472, 8680, 6366, 5252, 8223, 8181, 10548, 11823, 14640, 9873, 6613, 14415, 13204, 14982, 9690, 10693, 8276, 4519, 7865, 8137, 10022, 7646, 8749, 5246, 4736, 9705, 7501, 9587, 10078, 9732, 6986, 4385, 8451, 9815, 10894, 10287, 9666, 6072, 5418]
data= | pd.Series(data) | pandas.Series |
from src.typeDefs.wbesPxTableRecord import ISection_2_2, IWbesPxHeaders, IWbesPxTableRecord
import datetime as dt
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
import pandas as pd
def fetchWbesPxTableContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> ISection_2_2:
mRepo = MetricsDataRepo(appDbConnStr)
# get iex Px data for the range between start date and end date
wbesPxIexVals = mRepo.getWbesPxIexBlockWiseData(startDt, endDt)
wbesPxPxiVals = mRepo.getWbesPxPxiBlockWiseData(startDt, endDt)
wbesPxIexDf = pd.DataFrame(wbesPxIexVals)
wbesPxPxiDf = pd.DataFrame(wbesPxPxiVals)
wbesPxIexTableDf = wbesPxIexDf.groupby(['time_stamp', 'beneficiary', 'beneficiary_type']).sum()
wbesPxPxiTableDf = wbesPxPxiDf.groupby(['time_stamp', 'beneficiary', 'beneficiary_type']).sum()
wbesPxIexTableDf = wbesPxIexTableDf.rename(columns={'data_value': 'px_iex_data'})
wbesPxIexTableDf.reset_index(inplace = True)
index_names = wbesPxIexTableDf[wbesPxIexTableDf['beneficiary_type'] == 'path'].index
wbesPxIexTableDf.drop(index_names, inplace = True)
index_names = wbesPxIexTableDf[wbesPxIexTableDf['beneficiary'] == 'West '].index
wbesPxIexTableDf.drop(index_names, inplace = True)
wbesPxIexTableDf.reset_index(inplace = True)
for itr in range(len(wbesPxIexTableDf)):
if wbesPxIexTableDf['beneficiary_type'][itr] == ' Injection ':
wbesPxIexTableDf['beneficiary_type'][itr] = 'Sell'
wbesPxIexTableDf['px_iex_data'][itr] = -1*(wbesPxIexTableDf['px_iex_data'][itr])
if wbesPxIexTableDf['beneficiary_type'][itr] == ' Drawal ':
wbesPxIexTableDf['beneficiary_type'][itr] = 'Buy'
wbesPxIexTableDf['beneficiary_name'] = wbesPxIexTableDf.beneficiary.str.cat(wbesPxIexTableDf.beneficiary_type,sep=" ")
wbesPxIexTableDf.drop(['index', 'beneficiary_type', 'beneficiary'],axis=1,inplace=True)
wbesPxPxiTableDf = wbesPxPxiTableDf.rename(columns={'data_value': 'px_pxi_data'})
wbesPxPxiTableDf.reset_index(inplace = True)
index_names = wbesPxPxiTableDf[wbesPxPxiTableDf['beneficiary_type'] == 'path'].index
wbesPxPxiTableDf.drop(index_names, inplace = True)
index_names = wbesPxPxiTableDf[wbesPxPxiTableDf['beneficiary'] == 'West '].index
wbesPxPxiTableDf.drop(index_names, inplace = True)
wbesPxPxiTableDf.reset_index(inplace = True)
for itr in range(len(wbesPxPxiTableDf)):
if wbesPxPxiTableDf['beneficiary_type'][itr] == ' Injection ':
wbesPxPxiTableDf['beneficiary_type'][itr] = 'Sell'
wbesPxPxiTableDf['px_pxi_data'][itr] = -1*(wbesPxPxiTableDf['px_pxi_data'][itr])
if wbesPxPxiTableDf['beneficiary_type'][itr] == ' Drawal ':
wbesPxPxiTableDf['beneficiary_type'][itr] = 'Buy'
wbesPxPxiTableDf['beneficiary_name'] = wbesPxPxiTableDf.beneficiary.str.cat(wbesPxPxiTableDf.beneficiary_type,sep=" ")
wbesPxPxiTableDf.drop(['index', 'beneficiary_type', 'beneficiary'],axis=1,inplace=True)
# testing
testPxIex = wbesPxIexTableDf
testPxPxi = wbesPxPxiTableDf
testPxPxi = testPxPxi.rename(columns={'px_pxi_data': 'data_value'})
testPxIex = testPxIex.rename(columns={'px_iex_data': 'data_value'})
testPxIex = testPxIex.append(testPxPxi, ignore_index=True)
testPxIex = testPxIex.groupby(['time_stamp', 'beneficiary_name']).sum()
testPxIex.reset_index(inplace = True)
testPxIex['data_value'] = testPxIex['data_value']/4
testPxIex['data_value'] = testPxIex['data_value'].astype(int)
testPxIex['time_stamp'] = | pd.to_datetime(testPxIex['time_stamp']) | pandas.to_datetime |
#For this code to run, KaggleSubmissionFile10836.csv is to be submitted as input file.
#The concept here is the best score file is fedback to the model as training data along with actual train.csv
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import math
import numpy as ndarray
import xgboost as xgb
from sklearn import linear_model
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Ridge
from sklearn.linear_model import RidgeCV
from sklearn.cross_validation import KFold
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.kernel_ridge import KernelRidge
#from sklearn.cross_validation import GridSearchCV
from keras.wrappers.scikit_learn import KerasRegressor
from keras.models import Sequential
from keras.layers import Dense
from scipy.stats import norm
#from scipy.weave import inline
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVR
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn import ensemble
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition.pca import PCA
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNetCV
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA as sklearnPCA
from sklearn import decomposition, ensemble
from sklearn import utils
from sklearn import preprocessing
from sklearn import tree
from scipy import stats
from sklearn import neighbors
from scipy.stats import skew
import matplotlib.pyplot as plt
from scipy.stats import skew
import xgboost as xgb
from sklearn.cross_validation import KFold
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, Lasso
from math import sqrt
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
TARGET = 'SalePrice'
NFOLDS = 4
SEED = 0
NROWS = None
# SUBMISSION_FILE = '../input/sample_submission.csv'
SUBMISSION_FILE = 'C:/Users/<NAME>ab/submissions.csv'
train = pd.read_csv("C:/Users/<NAME>/Desktop/kaggle/train.csv")
train= train[train["GrLivArea"] < 4000]
test_read = pd.read_csv("C:/Users/<NAME>/Desktop/kaggle/test.csv")
#visualisations
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#scatter plot totalbsmtsf/saleprice
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
var = 'YearBuilt'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(16, 8))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
plt.xticks(rotation=90);
plt.show()
####################################################FINDING THE ORDER OF IMPORTANCE OF THE VARIABLES ON SALE PRICE####################################################
corr=train.corr()["SalePrice"]
corr[np.argsort(corr, axis=0)[::-1]]
#correlation matrix
corrmat = train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
k = 10 #number of variables for heatmap
cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(train[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
#scatterplot
sns.set()
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']
sns.pairplot(train[cols], size = 2.5)
plt.show()
#reading in the test predictions set from the best score
test_pred = pd.read_csv("D:\KaggleSubmissionFile10836.csv")
#importing the best score to be compared with the results
y_final_read = pd.read_csv("D:\KaggleSubmissionFile10836.csv")
y_final = y_final_read["SalePrice"]
#combined dataset
test_read['SalePrice'] = test_pred["SalePrice"]
test_read.loc[test_read['MSSubClass'] == 20, 'MSZoning'] = 'RL'
test_read.loc[test_read['MSSubClass'] == 30, 'MSZoning'] = 'RM'
test_read.loc[test_read['MSSubClass'] == 70, 'MSZoning'] = 'RM'
test_read.loc[test_read['SaleType'].isnull(), 'SaleType'] = 'WD'
test_read.loc[test_read['BsmtFinType1'].isnull(), 'TotalBsmtSF'] = 0
test_read.loc[666, "GarageQual"] = "TA"
test_read.loc[666, "GarageCond"] = "TA"
test_read.loc[666, "GarageFinish"] = "Unf"
test_read.loc[666, "GarageYrBlt"] = "1980"
test_read.loc[1116, "GarageType"] = np.nan
X = pd.concat( (train.loc[:,'MSSubClass':'SalePrice'],test_read.loc[:,'MSSubClass':'SalePrice']) ,ignore_index=True)
predictors = X.loc[:,'MSSubClass':'SaleCondition']
# Looking at categorical values
def cat_exploration(column):return X[column].value_counts()
#Imputing the missing values
def cat_imputation(column, value):X.loc[X[column].isnull(),column] = value
#missing data #train dataset # test dataset
def show_missing():
missing = X.columns[X.isnull().any()].tolist()
return missing
# X["LotFrontage"] = X.groupby("Neighborhood").transform(lambda x: x.fillna(x.mean()))
X.loc[X.LotFrontage.isnull(), 'LotFrontage'] = X.groupby('Neighborhood').LotFrontage.transform('median')
X["CentralAir"] = (X["CentralAir"] == "Y") * 1.0
X.loc[X['Alley'].isnull(), 'Alley'] = 'No alley access'
X.loc[:, "KitchenQual"] = X.loc[:, "KitchenQual"].fillna("TA")
del X['1stFlrSF']
del X['2ndFlrSF']
del X['LowQualFinSF']
# only three values not matching for train and many for test
# del X['Neighborhood']
# Seem to have similar values related to MasVnrType
del X['MasVnrType']
del X['BsmtFullBath']
del X['BsmtHalfBath']
del X['HalfBath']
del X['Foundation']
del X['MSSubClass']
del X['MSZoning']
cat_imputation('MasVnrArea', 'None')
cat_imputation('PoolQC','None')
cat_imputation('Fence', 'None')
cat_imputation('MiscFeature', 'None')
cat_imputation('Electrical','SBrkr')
cat_imputation('FireplaceQu','None')
X["Is_Electrical_SBrkr"] = (X["Electrical"] == "SBrkr") * 1
basement_cols=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','BsmtFinSF1','BsmtFinSF2']
X[basement_cols][X['BsmtQual'].isnull()==True]
for cols in basement_cols:
if 'FinSF'not in cols: cat_imputation(cols,'None')
del X['BsmtFinSF1']
del X['BsmtFinSF2']
del X['BsmtUnfSF']
garage_cols=['GarageType','GarageQual','GarageCond','GarageYrBlt','GarageFinish','GarageCars','GarageArea']
X[garage_cols][X['GarageType'].isnull()==True]
for cols in garage_cols:
if X[cols].dtype==np.object:
cat_imputation(cols,'None')
else:
cat_imputation(cols, 0)
X['HasBsmt'] = pd.Series(len(X['TotalBsmtSF']), index=X.index)
X['HasBsmt'] = 0
X.loc[X['TotalBsmtSF']>0,'HasBsmt'] = 1
X.loc[X['HasBsmt']==1,'TotalBsmtSF'] = np.log(X['TotalBsmtSF'])
quality_dict = {"None": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}
X["ExterQual"] = X["ExterQual"].map(quality_dict).astype(int)
X["ExterCond"] = X["ExterCond"].map(quality_dict).astype(int)
X["BsmtQual"] = X["BsmtQual"].map(quality_dict).astype(int)
X["BsmtCond"] = X["BsmtCond"].map(quality_dict).astype(int)
X["HeatingQC"] = X["HeatingQC"].map(quality_dict).astype(int)
X["KitchenQual"] = X["KitchenQual"].map(quality_dict).astype(int)
X["FireplaceQu"] = X["FireplaceQu"].map(quality_dict).astype(int)
X["GarageQual"] = X["GarageQual"].map(quality_dict).astype(int)
X["GarageCond"] = X["GarageCond"].map(quality_dict).astype(int)
# cat_imputation('LotFrontage',(X['LotArea']/2)**(.5))
X[show_missing()].isnull().sum()
#applying log transformation
X['SalePrice'] = np.log(X['SalePrice'])
#data transformation
X['GrLivArea'] = np.log(X['GrLivArea'])
X["Aggregate_OverallQual"] = X.OverallQual.replace( {1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10 : 3})
X["New_House"] = (X["YearRemodAdd"] == X["YrSold"]) * 1
#convert categorical variable into dummy
#good-way to convert categorical variable into dummy without increasing the column numbers
# X = X.replace({
# "MSZoning":{"A":1,"C (all)":2, "FV":3, "I":4, "RH":5, "RL":6, "RP":7, "RM":8},
# "Street":{"Grvl":1,"Pave":2},
# "LotShape":{"Reg":1,"IR1":2, "IR2":3, "IR3":4},
# "LandContour":{"Lvl":1,"Bnk":2, "HLS":3, "Low":4},
# "Utilities":{"AllPub":1,"NoSewr":2, "NoSeWa":3, "ELO":4},
# "LotConfig":{"Inside":1,"Corner":2, "CulDSac":3, "FR2":4, "FR3":5},
# "LandSlope":{"Gtl":1,"Mod":2, "Sev":3},
# "Neighborhood":{"Blmngtn":1, "Blueste":2, "BrDale":3, "BrkSide":4, "ClearCr":5, "CollgCr":6, "Crawfor":7, "Edwards":8, "Gilbert":9, "IDOTRR":10, "MeadowV":11, "Mitchel":12, "Names":13, "NoRidge":14, "NPkVill":15, "NridgHt":16, "NWAmes":17, "OldTown":18, "SWISU":19, "Sawyer":20, "SawyerW":21, "Somerst":22, "StoneBr":23, "Timber":24, "Veenker":25},
# "Condition1":{"Artery":1, "Feedr":2, "Norm":3, "RRNn":4, "RRAn":5, "PosN":6, "PosA":7, "RRNe":8, "RRAe":9},
# "Condition2":{"Artery":1, "Feedr":2, "Norm":3, "RRNn":4, "RRAn":5, "PosN":6, "PosA":7, "RRNe":8, "RRAe":9},
# "BldgType":{"1Fam":1, "2FmCon":2, "Duplx":3, "TwnhsE":4, "TwnhsI":5},
# "HouseStyle":{"1Story":1, "1.5Fin":2, "1.5Unf":3, "2Story":4, "2.5Fin":5, "2.5Unf":6, "SFoyer":7, "SLvl":8},
# "RoofStyle":{"Flat":1, "Gable":2, "Gambrel":3, "Hip":4, "Mansard":5, "Shed":6},
# "RoofMatl":{"ClyTile":1, "CompShg":2, "Membran":3, "Metal":4, "Roll":5, "Tar&Grv":6, "WdShake":7, "WdShngl":8},
# "Exterior1st":{ "AsbShng":1, "AsphShn":2, "BrkComm":3, "BrkFace":4, "CBlock":5, "CemntBd":6, "HdBoard":7, "ImStucc":8, "MetalSd":9, "Other ":10, "Plywood":11, "PreCast":12, "Stone":13, "Stucco":14, "VinylSd":15, "Wd Sdng":16, "WdShing":17},
# "Exterior2nd":{ "AsbShng":1, "AsphShn":2, "BrkComm":3, "BrkFace":4, "CBlock":5, "CemntBd":6, "HdBoard":7, "ImStucc":8, "MetalSd":9, "Other ":10, "Plywood":11, "PreCast":12, "Stone":13, "Stucco":14, "VinylSd":15, "Wd Sdng":16, "WdShing":17},
# "MasVnrType":{"BrkCmn":1, "BrkFace":2, "CBlock":3, "None":4, "Stone":5},
# "ExterQual":{"Ex":1, "Gd":2, "TA":3, "Fa":4, "Po":5},
# "ExterCond":{"Ex":1, "Gd":2, "TA":3, "Fa":4, "Po":5},
# "Foundation":{"BrkTil":1, "CBlock":2, "PConc":3, "Slab":4, "Stone":5, "Wood":6},
# "BsmtQual":{"Ex":1, "Gd":2, "TA":3, "Fa":4, "Po":5, "No":6},
# "BsmtCond":{"Ex":1, "Gd":2, "TA":3, "Fa":4, "Po":5, "No":6},
# "BsmtExposure":{"Gd":5, "Av":4, "Mn":3, "No":2, "NoBasement":1},
# "BsmtFinType1":{"NoBasement":1, "Unf":2, "LwQ":3, "Rec":4, "BLQ":5, "ALQ":6, "GLQ":7},
# "BsmtFinType2":{"NoBasement":1, "Unf":2, "LwQ":3, "Rec":4, "BLQ":5, "ALQ":6, "GLQ":7},
# "SaleType":{ "WD ":1, "CWD":2, "VWD":3, "New":4, "COD":5, "Con":6, "ConLw":7, "ConLI":8, "ConLD":9, "Oth":10},
# "SaleCondition":{ "Normal":1, "Abnorml":2, "AdjLand":3, "Alloca":4, "Family":5, "Partial":6},
# "Heating":{"Floor":1, "GasA":2, "GasW":3, "Grav":4, "OthW":5, "Wall":6},
# "HeatingQC":{"Ex":5, "Gd":4, "TA":3, "Fa":2, "Po":1},
# "CentralAir":{"N":2, "Y":1},
# "Electrical":{"SBrkr":5, "FuseA":4, "FuseF":3, "FuseP":2, "Mix":1},
# "KitchenQual":{"Ex":5, "Gd":4, "TA":3, "Fa":2, "Po":1},
# "Functional":{ "Typ":1, "Min1":2, "Min2":3, "Mod":4, "Maj1":5, "Maj2":6, "Sev":7, "Sal":8},
# "GarageType":{ "2Types":1, "Attchd":2, "Basment":3, "BuiltIn":4, "CarPort":5, "Detchd":6, "No":7},
# "GarageFinish":{"Fin":4, "RFn":3, "Unf":2, "No":1},
# "GarageQual":{"Ex":5, "Gd":4, "TA":3, "Fa":2, "Po":1, "No":6},
# "GarageCond":{"Ex":5, "Gd":4, "TA":3, "Fa":2, "Po":1, "No":6},
# "PavedDrive":{"Y":5, "P":4, "N":3}
# })
X = pd.get_dummies(X)
X= X.fillna(X.mean())
X_train = X[:len(train)]
X_test=X[len(train):]
Y = X['SalePrice']
from sklearn.model_selection import train_test_split
#from sklearn.cross_validation import train_test_split
X_train_part, X_val, Y_train_part, Y_val = train_test_split(X, Y)
X_test = | pd.get_dummies(X_test) | pandas.get_dummies |
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import csv
import pandas as pd
from tqdm import tqdm, trange
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
N_TOTAL_PAPERS = 24251
N_TOTAL_AUTHORS = 42614
N_TOTAL_NODES = N_TOTAL_PAPERS + N_TOTAL_AUTHORS
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_reference_edges(path="dataset/"):
print('Loading edge list...')
reference_links = np.load("../edge_and_weight_01.npy")
# reference_links = np.vstack([reference_links, np.fliplr(reference_links)])
# reference_links = pd.DataFrame(reference_links).drop_duplicates().values
reference_edge_weight = np.expand_dims(reference_links[:, -1], 1)
reference_edge_type = np.zeros((reference_links.shape[0], 1), dtype = int)
# pd.DataFrame(reference_links, columns=['src', 'dst', 'weight']).to_csv(path + "reference_edgelist.csv", index=False)
reference_links = reference_links[:, :-1]
return reference_links, reference_edge_weight, reference_edge_type
def count_citation(path="dataset/"):
print("Running citation counting...")
referenced = pd.read_csv(path + "paper_reference.csv").values[:, -1]
return pd.Series(referenced).value_counts()
def load_author_edges(path="dataset/"):
print('Loading edge list...')
coauthor_links = np.load(path + "coauthor.npy").astype(int)
coauthor_links = np.vstack([coauthor_links, np.fliplr(coauthor_links)])
coauthor_edges = pd.DataFrame(coauthor_links).value_counts()
coauthor_links = np.asarray(list(coauthor_edges.index))
# coauthor_edge_weight = np.ones((coauthor_links.shape[0], 1))
coauthor_edge_weight = 1 / (1 + np.exp(-0.5 * np.expand_dims(
np.asarray(list(coauthor_edges.values)), 1)))
coauthor_edge_type = 2 * np.ones((coauthor_links.shape[0], 1), dtype = int)
author_reference_links = np.load(path + "author_reference.npy")
author_reference_edges = pd.DataFrame(author_reference_links).value_counts()
author_reference_links = np.asarray(list(author_reference_edges.index))
author_reference_edge_weight = 1 / (1 + np.exp(-0.5 * np.expand_dims(
np.asarray(list(author_reference_edges.values)), 1)))
author_reference_edge_type = 3 * np.ones((author_reference_links.shape[0], 1), dtype = int)
edges_unordered = np.vstack([coauthor_links, author_reference_links])
edges_weight = np.vstack([coauthor_edge_weight, author_reference_edge_weight])
edges_type = np.vstack([coauthor_edge_type, author_reference_edge_type])
edges_info = pd.DataFrame(np.hstack([edges_unordered, edges_weight, edges_type]),
columns=['src', 'dst', 'weight', 'type']).drop_duplicates(subset=['src', 'dst']).values
return edges_info[:, :2], np.expand_dims(edges_info[:, 2], 1), np.expand_dims(edges_info[:, -1], 1)
def load_edges(path="dataset/"):
print('Loading edge list...')
reference_links = pd.read_csv(path + "paper_reference.csv").values
reference_links = np.vstack([reference_links, np.fliplr(reference_links)])
reference_links = pd.DataFrame(reference_links).drop_duplicates().values
reference_edge_weight = np.ones((reference_links.shape[0], 1), dtype = float)
reference_edge_type = np.zeros((reference_links.shape[0], 1), dtype = int)
author_paper_links = pd.read_csv(path + "author_paper_all_with_year.csv").values[:, 0:-1]
author_paper_links[:, 0] += N_TOTAL_PAPERS
author_paper_links = np.vstack([author_paper_links, np.fliplr(author_paper_links)])
# author_paper_edges = np.hstack([author_paper_links, np.ones((author_paper_links.shape[0], 1))])
author_paper_edges = np.hstack([author_paper_links, np.load(path + "author_paper_edge_weight.npy")])
author_paper_edges = pd.DataFrame(author_paper_edges, columns=['i', 'j', 'w']).drop_duplicates(subset=['i', 'j']).values
author_paper_links = author_paper_edges[:, 0:-1]
author_paper_edge_weight = np.ones((author_paper_links.shape[0], 1))
# author_paper_edge_weight = np.expand_dims(author_paper_edges[:, -1], 1) / author_paper_edges[:, -1].mean()
author_paper_edge_type = np.ones((author_paper_links.shape[0], 1), dtype = int)
coauthor_links = np.load(path + "coauthor.npy").astype(int) + N_TOTAL_PAPERS
coauthor_links = np.vstack([coauthor_links, np.fliplr(coauthor_links)])
coauthor_edges = | pd.DataFrame(coauthor_links) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import collections
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from pandas.compat import StringIO, u
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0,
header=None, parse_dates=True)
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean() as path:
self.ts.to_csv(path)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = self.read_csv(path)
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = self.read_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
# see gh-10483
self.ts.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
self.series.to_csv(path)
series = self.read_csv(path)
assert_series_equal(self.series, series, check_names=False)
assert series.name is None
assert series.index.name is None
self.series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
outfile = open(path, "w")
outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
outfile.close()
series = self.read_csv(path, sep="|")
check_series = Series({datetime(1998, 1, 1): 1.0,
datetime(1999, 1, 1): 2.0})
assert_series_equal(check_series, series)
series = self.read_csv(path, sep="|", parse_dates=False)
check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0})
assert_series_equal(check_series, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
with io.open(path, newline=None) as f:
lines = f.readlines()
assert (lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding="UTF-8")
buf.seek(0)
s2 = self.read_csv(buf, index_col=0, encoding="UTF-8")
assert_series_equal(s, s2)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format="%.2f")
rs = self.read_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = | Series(['jack and jill', 'jesse and frank']) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas.compat import long
from pandas.core.arrays import PeriodArray, DatetimeArrayMixin as DatetimeArray
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
# zero-dim integer array behaves like an integer
return request.param
zeros = [box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]]
zeros.extend([np.array(0, dtype=dtype)
for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0, long(0)])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
pd.Int64Index(np.arange(5, dtype='int64')),
pd.UInt64Index(np.arange(5, dtype='uint64')),
pd.RangeIndex(5)],
ids=lambda x: type(x).__name__)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return | pd.Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') | pandas.Series |
from os import listdir
import numpy as np
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
def identify(in_dir, out_dir):
outlier_threshold = st.norm.ppf(0.99) # threshold for events - 99% significance
fs = listdir(in_dir) # directory containing files for each cell with the predictions of the curve fitting procedure
events = pd.DataFrame(columns=['GeomID', 'date', 'event']) # dataframe to contain events
rmses = []
for fi in fs:
df = | pd.read_csv(in_dir + '/' + fi) | pandas.read_csv |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is Series:
ts = ts[0]
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = | timezones.maybe_get_tz(tzstr) | pandas._libs.tslibs.timezones.maybe_get_tz |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Calculate the Severe to critical infections column of Table 1
Needs the filter_SRAG.py csv output to run
The Comorbidities are written like the original database keywords:
'NENHUM': No Comorbidities
'PNEUMOPATI': Lung Disease
'IMUNODEPRE': Lung Disease
'OBESIDADE': Obesity
'SIND_DOWN': Down Syndrome
'RENAL': Kidney Disease
'NEUROLOGIC': Neurological chronic disease
'DIABETES': Diabetes
'PUERPERA': Post-partum
'HEMATOLOGI': Hematologic chronic disease
'ASMA': Asthma
'HEPATICA': Liver disease
'CARDIOPATI': Heart disease
'OUT_MORBI': other comorbidity
"""
import numpy as np
from scipy.optimize import minimize, root
import datetime
import pandas as pd
ref = pd.to_datetime(datetime.date(2019, 12, 31))
data_init = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data_init['MORTE'] = (data_init.EVOLUCAO == 2)
states = np.r_[np.array([ 'BR' ]), data_init.SG_UF_INTE.unique()]
for col in data_init.columns:
if (col[:2] == 'DT') or (col[:4] == 'DOSE'):
data_init.loc[:,col] = pd.to_datetime(data_init[col], format='%Y/%m/%d', errors='coerce')
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
data_init['AGE_GRP'] = ''
for i in range(nsep):
if i == nsep-1:
data_init.loc[(data_init.NU_IDADE_N>=ages[i]),'AGE_GRP'] = 'AG85+'
else:
data_init.loc[(data_init.NU_IDADE_N>=ages[i])&(data_init.NU_IDADE_N<ages[i+1]), 'AGE_GRP'] = 'AG{}t{}'.format(ages[i],ages[i+1])
ibpv = [data_init.ibp.quantile(x) for x in [0.0,0.2,0.4,0.6,0.8,1.0]]
names = [ 'BDI_' + i for i in ['0', '1', '2', '3', '4']]
data_init['BDI_GRP'] = ''
for i in range(5):
if i == 4:
data_init.loc[(data_init.ibp>=ibpv[i]),'BDI_GRP'] = names[i]
else:
data_init.loc[(data_init.ibp>=ibpv[i])&(data_init.ibp<ibpv[i+1]), 'BDI_GRP'] = names[i]
# trad_raca = {1:'Branca', 2:'Preta', 3:'Amarela', 4:'Parda', 5:'Indigena'}
trad_raca = {1:'White', 2:'Black', 3:'Yellow', 4:'Mixed', 5:'Indigenous'}
data_init['RACA'] = data_init['CS_RACA'].map(trad_raca)
ages = {loc:(data_init.AGE_GRP==loc).sum() for loc in data_init.AGE_GRP.unique()}
print(ages)
sexs = {loc:(data_init.CS_SEXO==loc).sum() for loc in data_init.CS_SEXO.unique()}
print(sexs)
raca = {loc:(data_init.RACA==loc).sum() for loc in data_init.RACA.unique()}
raca[np.nan] = | pd.isna(data_init.RACA) | pandas.isna |
#!/usr/bin/env python
# coding: utf-8
# # Falta acertar:
#
# puxar documento com zero
# Identificar se o documento é cpf ou cnpj e colocar a formatação.
#
# avisar quantos clientes aprovados, cancelados, descredenciados, etc ficaram na base final
#
#
# ### Adicionar ids para subir casos no Salesforce
#
# Fazer bd com os registros e ids do Salesforce, atualizar essa bd todo dia às 08:00 \
# Esse script buscar no csv de cima e adicionar uma coluna com os Ids, correspondentes aos SCs.
#
# API https://pypi.org/project/simple-salesforce/
#
#
# #### Conferir váriaveis SendGrid
#
# Colocar o template ID, ele puxar as variaveis do SendGrid para renomear as colunas de acordo com as variaveis
#
#
# # Cosmeticos
#
# Remover todos os espaços e caracteres não imprimíveis ( https://support.microsoft.com/pt-br/office/as-dez-principais-maneiras-de-limpar-os-dados-2844b620-677c-47a7-ac3e-c2e157d1db19) \
# https://support.microsoft.com/pt-br/office/fun%C3%A7%C3%A3o-tirar-26f3d7c5-475f-4a9c-90e5-4b8ba987ba41 \
# https://support.microsoft.com/pt-br/office/fun%C3%A7%C3%A3o-arrumar-410388fa-c5df-49c6-b16c-9e5630b479f9
#
#
#
#
# # Carregando bibliotecas
# Carregar o pandas, pyodbc e etc
#!pip install Salesforce
import pandas as pd
import pyodbc
#import timeit
#import time
import math
from tqdm import tqdm
from simple_salesforce import Salesforce
from simple_salesforce import format_soql
### configurações de login no DW
email = '<EMAIL>' #seu email de login no DW
senha = '<PASSWORD>' #sua senha de Login no DW
### configuração de login no Salesforce
sf = Salesforce(username='<EMAIL>', password='<PASSWORD>', security_token='<PASSWORD>')
# In[2]:
#Definindo funções
# Padrão de função
# def nome_funcao(variavel: type, variavel2: type) -> type retorno:
# '''Descreva função
# Parametros:
# variavel : type
# descrever variavel
# variavel2 : type
# descrever variavel
# retorno:
# qual a saida da função
# '''
# Fazer função
# ### Colocar para indicar que terminou a função
def import_db(file_path: str, separator: str) -> 'pandas.core.frame.DataFrame':
'''Identifica se o arquivo é excel ou csv e importa de forma correta
Parametros:
file_path : str
arquivo a fazer a análise
separator : str
separador do csv
Retorna:
dataframe do arquivo
'''
if '.xlsx' in file_path[-5:]:
print('Identificado arquivo Excel, executando rotina de Excel.')
return pd.read_excel(file_path, dtype=str)
elif '.csv' in file_path[-4:]:
print('Identificado arquivo csv, executando rotina de csv.')
return | pd.read_csv(file_path, sep=separator, dtype=str, skipinitialspace=True, skip_blank_lines=True) | pandas.read_csv |
from pandas import DataFrame, read_excel, ExcelFile, read_csv, concat, Series, \
notnull
from pathlib import Path
from re import match
from typing import Optional, List, Union, Callable
from survey import Survey
from survey.attributes import PositiveMeasureAttribute
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
from survey.attributes import RespondentAttribute, SingleCategoryAttribute
from survey.attributes import CountAttribute
from survey.questions import Question
from survey.questions import SingleChoiceQuestion, FreeTextQuestion, \
LikertQuestion, MultiChoiceQuestion
from survey.questions import CountQuestion
from survey.questions import PositiveMeasureQuestion
from survey.questions import RankedChoiceQuestion
from survey.respondents import Respondent
from survey.surveys.metadata.attribute_metadata import AttributeMetadata
from survey.surveys.metadata.question_metadata import QuestionMetadata
from survey.surveys.survey_creators.choices import get_choices, \
get_likert_choices, get_multi_choices
class SurveyCreator(object):
def __init__(self,
survey_name: str,
survey_data_fn: Union[str, Path],
metadata_fn: Union[str, Path],
survey_id_col: Optional[str] = None,
survey_id: Optional = None,
pre_clean: Optional[Callable[[DataFrame], DataFrame]] = None):
"""
Create a new SurveyCreator.
:param survey_name: Name for the survey.
:param survey_data_fn: Path to the survey raw data file.
:param metadata_fn: Path to the survey metadata file.
:param survey_id_col: Optional name of the column that identifies the
survey in the metadata file.
:param survey_id: Optional value that identifies the survey in the
metadata file.
:param pre_clean: Optional method to run on the raw data file on read.
Used if there are some values in this specific raw
data file that need changing in some way.
"""
# now
self.survey_name: str = survey_name
self.survey_data_fn: Path = (
survey_data_fn if isinstance(survey_data_fn, Path)
else Path(survey_data_fn)
)
self.metadata_fn: Path = (
metadata_fn if isinstance(metadata_fn, Path)
else Path(metadata_fn)
)
self.survey_id_col: Optional[str] = survey_id_col
self.survey_id: Optional = survey_id
self.survey: Optional[Survey] = None
self.pre_clean = pre_clean
# later
self.survey_data: Optional[DataFrame] = None
self.questions_metadata: Optional[DataFrame] = None
self.attributes_metadata: Optional[DataFrame] = None
self.orders_metadata: Optional[DataFrame] = None
self.question_metadatas: Optional[List[QuestionMetadata]] = None
self.attribute_metadatas: Optional[List[AttributeMetadata]] = None
self.questions: Optional[List[Question]] = None
self.respondent_attributes: Optional[List[RespondentAttribute]] = None
self.respondents: Optional[List[Respondent]] = None
# focus vision
self.loop_mappings: Optional[DataFrame] = None
self.loop_expressions: Optional[DataFrame] = None
self.questions_metadata_original: Optional[DataFrame] = None
def run(self) -> Survey:
"""
Run all the steps to create the Survey object.
"""
self.read_survey_data()
self.read_metadata()
self.validate_metadata()
self.convert_metadata_to_objects()
self.clean_survey_data()
self.format_survey_data()
self.create_survey_components()
self.create_survey()
return self.survey
def read_survey_data(self):
"""
Read the raw survey data file and do any custom pre-cleaning.
"""
data = read_csv(self.survey_data_fn)
if self.pre_clean is not None:
data = self.pre_clean(data)
self.survey_data = data
def _filter_to_survey(self, metadata: DataFrame) -> DataFrame:
"""
Filter the given metadata to only contain metadata for the current
survey.
"""
if self.survey_id_col in metadata.columns:
metadata = metadata.loc[
(metadata[self.survey_id_col] == self.survey_id) |
(metadata[self.survey_id_col].isnull())
]
return metadata
def read_metadata(self):
"""
Read the question, attribute and order metadata from the Excel
metadata file.
"""
metadata = | ExcelFile(self.metadata_fn) | pandas.ExcelFile |
from functools import partial
import os
import shutil
from attrdict import AttrDict
import neptune
from neptunecontrib.api.utils import get_filepaths
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score
from steppy.base import Step, IdentityOperation
from steppy.adapter import Adapter, E
from common_blocks import augmentation as aug
from common_blocks import models
from common_blocks import loaders
from common_blocks import utils
from common_blocks import postprocessing
utils.check_env_vars()
CONFIG = utils.read_config(config_path=os.getenv('CONFIG_PATH'))
LOGGER = utils.init_logger()
neptune.init(project_qualified_name=CONFIG.project)
# ______ ______ .__ __. _______ __ _______ _______.
# / | / __ \ | \ | | | ____|| | / _____| / |
# | ,----'| | | | | \| | | |__ | | | | __ | (----`
# | | | | | | | . ` | | __| | | | | |_ | \ \
# | `----.| `--' | | |\ | | | | | | |__| | .----) |
# \______| \______/ |__| \__| |__| |__| \______| |_______/
#
EXPERIMENT_NAME = 'empty_vs_non_empty'
EXPERIMENT_DIR = 'data/experiments/{}'.format(EXPERIMENT_NAME)
CLONE_EXPERIMENT_DIR_FROM = '' # When running eval in the cloud specify this as for example /input/SAL-14/output/experiment
OVERWRITE_EXPERIMENT_DIR = False
DEV_MODE = False
SECOND_LEVEL = False
USE_DEPTH = False
USE_AUXILIARY_DATA = False
TAGS = ['first-level', 'training', 'empty_vs_non_empty']
if OVERWRITE_EXPERIMENT_DIR and os.path.isdir(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
if CLONE_EXPERIMENT_DIR_FROM != '':
if os.path.exists(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
shutil.copytree(CLONE_EXPERIMENT_DIR_FROM, EXPERIMENT_DIR)
PARAMS = CONFIG.parameters
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
SEED = 1234
ID_COLUMN = 'id'
DEPTH_COLUMN = 'z'
X_COLUMN = 'file_path_image'
Y_COLUMN = 'is_not_empty'
if USE_DEPTH:
x_columns = [X_COLUMN, DEPTH_COLUMN]
else:
x_columns = [X_COLUMN]
CONFIG = AttrDict({
'execution': {'experiment_dir': EXPERIMENT_DIR,
'num_workers': PARAMS.num_workers,
},
'general': {'img_H-W': (PARAMS.image_h, PARAMS.image_w),
'loader_mode': PARAMS.loader_mode,
'num_classes': 2,
'original_size': (101, 101),
},
'xy_splitter': {
'network': {'x_columns': x_columns,
'y_columns': [Y_COLUMN],
},
},
'reader': {
'network': {'x_columns': x_columns,
'y_columns': [Y_COLUMN],
},
},
'loaders': {'stacking': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'target_format': PARAMS.target_format,
'use_depth': USE_DEPTH,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': True,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {},
},
'resize_and_pad': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'use_depth': USE_DEPTH,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': True,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {'image_augment_train': aug.intensity_seq,
'image_augment_with_target_train': aug.resize_pad_seq(
resize_target_size=PARAMS.resize_target_size,
pad_method=PARAMS.pad_method,
pad_size=PARAMS.pad_size),
'image_augment_inference': aug.pad_to_fit_net(64,
PARAMS.pad_method),
'image_augment_with_target_inference': aug.pad_to_fit_net(64,
PARAMS.pad_method)
},
},
'pad_tta': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'use_depth': USE_DEPTH,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': True,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {
'image_augment_inference': aug.pad_to_fit_net(64, PARAMS.pad_method),
'image_augment_with_target_inference': aug.pad_to_fit_net(64,
PARAMS.pad_method),
'tta_transform': aug.test_time_augmentation_transform
},
},
'resize': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'use_depth': USE_DEPTH,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': True,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {'image_augment_train': aug.intensity_seq,
'image_augment_with_target_train': aug.resize_seq(
resize_target_size=PARAMS.resize_target_size),
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size)
},
},
'resize_tta': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'use_depth': USE_DEPTH,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': True,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'tta_transform': aug.test_time_augmentation_transform
},
},
},
'model': {
'network': {
'architecture_config': {'model_params': {'n_filters': PARAMS.n_filters,
'conv_kernel': PARAMS.conv_kernel,
'pool_kernel': PARAMS.pool_kernel,
'pool_stride': PARAMS.pool_stride,
'repeat_blocks': PARAMS.repeat_blocks,
'batch_norm': PARAMS.use_batch_norm,
'dropout': PARAMS.dropout_conv,
'in_channels': PARAMS.image_channels,
'out_channels': PARAMS.network_output_channels,
'nr_outputs': PARAMS.nr_network_outputs,
'architecture': PARAMS.architecture,
'activation': PARAMS.network_activation,
},
'optimizer_params': {'lr': PARAMS.lr,
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': PARAMS.l2_reg_conv,
},
'weights_init': {'function': 'xavier',
},
},
'training_config': {'epochs': PARAMS.epochs_nr,
'shuffle': True,
'batch_size': PARAMS.batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'network', 'best.torch'),
'epoch_every': 1,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
'exponential_lr_scheduler': {'gamma': PARAMS.gamma,
'epoch_every': 1},
'reduce_lr_on_plateau_scheduler': {'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric,
'reduce_factor': PARAMS.reduce_factor,
'reduce_patience': PARAMS.reduce_patience,
'min_lr': PARAMS.min_lr},
'training_monitor': {'batch_every': 0,
'epoch_every': 1},
'experiment_timing': {'batch_every': 0,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1,
'data_dir': PARAMS.train_images_dir,
'loader_mode': PARAMS.loader_mode,
'use_depth': USE_DEPTH},
'neptune_monitor': {'model_name': 'network',
'image_nr': 16,
'image_resize': 1.0,
'image_every': None,
'use_depth': USE_DEPTH},
'early_stopping': {'patience': PARAMS.patience,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
}
},
},
'tta_generator': {'flip_ud': False,
'flip_lr': True,
'rotation': False,
'color_shift_runs': 0},
'tta_aggregator': {'tta_inverse_transform': aug.test_time_augmentation_inverse_transform,
'method': PARAMS.tta_aggregation_method,
'nthreads': PARAMS.num_threads
},
'thresholder': {'threshold_masks': PARAMS.threshold_masks,
},
})
# .______ __ .______ _______ __ __ .__ __. _______ _______.
# | _ \ | | | _ \ | ____|| | | | | \ | | | ____| / |
# | |_) | | | | |_) | | |__ | | | | | \| | | |__ | (----`
# | ___/ | | | ___/ | __| | | | | | . ` | | __| \ \
# | | | | | | | |____ | `----.| | | |\ | | |____.----) |
# | _| |__| | _| |_______||_______||__| |__| \__| |_______|_______/
#
def emptiness_preprocessing_train(config, model_name='network', suffix=''):
reader_train = Step(name='xy_train{}'.format(suffix),
transformer=loaders.XYSplit(train_mode=True, **config.xy_splitter[model_name]),
input_data=['input'],
adapter=Adapter({'meta': E('input', 'meta')}),
experiment_directory=config.execution.experiment_dir)
reader_inference = Step(name='xy_inference{}'.format(suffix),
transformer=loaders.XYSplit(train_mode=True, **config.xy_splitter[model_name]),
input_data=['callback_input'],
adapter=Adapter({'meta': E('callback_input', 'meta_valid')}),
experiment_directory=config.execution.experiment_dir)
loader = Step(name='loader{}'.format(suffix),
transformer=loaders.EmptinessLoader(train_mode=True, **config.loaders.resize),
input_steps=[reader_train, reader_inference],
adapter=Adapter({'X': E(reader_train.name, 'X'),
'y': E(reader_train.name, 'y'),
'X_valid': E(reader_inference.name, 'X'),
'y_valid': E(reader_inference.name, 'y'),
}),
experiment_directory=config.execution.experiment_dir)
return loader
def emptiness_preprocessing_inference(config, model_name='network', suffix=''):
reader_inference = Step(name='xy_inference{}'.format(suffix),
transformer=loaders.XYSplit(train_mode=False, **config.xy_splitter[model_name]),
input_data=['input'],
adapter=Adapter({'meta': E('input', 'meta')}),
experiment_directory=config.execution.experiment_dir)
loader = Step(name='loader{}'.format(suffix),
transformer=loaders.EmptinessLoader(train_mode=False, **config.loaders.resize),
input_steps=[reader_inference],
adapter=Adapter({'X': E(reader_inference.name, 'X'),
'y': E(reader_inference.name, 'y'),
}),
experiment_directory=config.execution.experiment_dir,
cache_output=True)
return loader
def network(config, suffix='', train_mode=True):
if train_mode:
preprocessing = emptiness_preprocessing_train(config, model_name='network', suffix=suffix)
else:
preprocessing = emptiness_preprocessing_inference(config, suffix=suffix)
network = utils.FineTuneStep(name='network{}'.format(suffix),
transformer=models.SegmentationModel(**config.model['network']),
input_data=['callback_input'],
input_steps=[preprocessing],
adapter=Adapter({'datagen': E(preprocessing.name, 'datagen'),
'validation_datagen': E(preprocessing.name, 'validation_datagen'),
'meta_valid': E('callback_input', 'meta_valid'),
}),
is_trainable=True,
fine_tuning=config.model.network.training_config.fine_tuning,
experiment_directory=config.execution.experiment_dir)
mask_resize = Step(name='mask_resize{}'.format(suffix),
transformer=utils.make_apply_transformer(partial(postprocessing.resize_emptiness_predictions,
target_size=config.general.original_size),
output_name='resized_images',
apply_on=['images']),
input_steps=[network],
adapter=Adapter({'images': E(network.name, 'mask_prediction'),
}),
experiment_directory=config.execution.experiment_dir)
return mask_resize
# __________ ___ _______ ______ __ __ .___________. __ ______ .__ __.
# | ____\ \ / / | ____| / || | | | | || | / __ \ | \ | |
# | |__ \ V / | |__ | ,----'| | | | `---| |----`| | | | | | | \| |
# | __| > < | __| | | | | | | | | | | | | | | | . ` |
# | |____ / . \ | |____ | `----.| `--' | | | | | | `--' | | |\ |
# |_______/__/ \__\ |_______| \______| \______/ |__| |__| \______/ |__| \__|
#
def train_evaluate_cv():
meta = pd.read_csv(PARAMS.metadata_filepath)
if DEV_MODE:
meta = meta.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_train = meta[meta['is_train'] == 1]
with neptune.create_experiment(name=EXPERIMENT_NAME,
params=PARAMS,
tags=TAGS + ['train', 'evaluate', 'on_cv_folds'],
upload_source_files=get_filepaths(),
properties={'experiment_dir': EXPERIMENT_DIR}):
cv = utils.KFoldBySortedValue(n_splits=PARAMS.n_cv_splits, shuffle=PARAMS.shuffle, random_state=SEED)
fold_auc = []
for fold_id, (train_idx, valid_idx) in enumerate(cv.split(meta_train[DEPTH_COLUMN].values.reshape(-1))):
train_data_split, valid_data_split = meta_train.iloc[train_idx], meta_train.iloc[valid_idx]
if USE_AUXILIARY_DATA:
auxiliary = | pd.read_csv(PARAMS.auxiliary_metadata_filepath) | pandas.read_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
| pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64") | pandas.Series |
"""Momentum View"""
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.common.technical_analysis import momentum_model
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale, reindex_dates
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def display_cci(
ohlc: pd.DataFrame,
length: int = 14,
scalar: float = 0.0015,
s_ticker: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display CCI Indicator
Parameters
----------
ohlc : pd.DataFrame
Dataframe of OHLC
length : int
Length of window
scalar : float
Scalar variable
s_ticker : str
Stock ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
df_ta = momentum_model.cci(
ohlc["High"], ohlc["Low"], ohlc["Adj Close"], length, scalar
)
plot_data = | pd.merge(ohlc, df_ta, how="outer", left_index=True, right_index=True) | pandas.merge |
# <NAME>
# for BroadInsitute
# in 2019
from __future__ import print_function
import json
import os
import string
import subprocess
import pandas as pd
import numpy as np
import itertools
import random
import xmltodict
from biomart import BiomartServer
import io
chromsize = {
"chr1": 248956422,
"chr10":133797422,
"chr11":135086622,
"chr12":133275309,
"chr13":114364328,
"chr14":107043718,
"chr15":101991189,
"chr16":90338345,
"chr17":83257441,
"chr18":80373285,
"chr19":58617616,
"chr2": 242193529,
"chr20":64444167,
"chr21":46709983,
"chr22":50818468,
"chr3": 198295559,
"chr4": 190214555,
"chr5": 181538259,
"chr6": 170805979,
"chr7": 159345973,
"chr8": 145138636,
"chr9": 138394717,
"chrX": 156040895,
"chrY": 57227415,
}
rename_mut = {'contig': 'chr', 'position': 'pos', 'Reference_Allele': 'ref', 'ref_allele': 'ref', 'alt_allele': 'alt',
'Chromosome': 'chr', 'End_postition': 'end', 'Start_position': 'pos', 'Tumor_Seq_Allele1': "alt"}
def fileToList(filename):
"""
loads an input file with a\\n b\\n.. into a list [a,b,..]
"""
with open(filename) as f:
return [val[:-1] for val in f.readlines()]
def listToFile(l, filename):
"""
loads a list with [a,b,..] into an input file a\\n b\\n..
"""
with open(filename, 'w') as f:
for item in l:
f.write("%s\n" % item)
def dictToFile(d, filename):
"""
turn a dict into a json file
"""
with open(filename, 'w') as json_file:
json.dump(d, json_file)
def fileToDict(filename):
"""
loads a json file into a python dict
"""
with open(filename) as f:
data = json.load(f)
return data
def batchMove(l, pattern=['*.', '.*'], folder='', add=''):
"""
moves a set of files l into a folder:
Args:
l: file list
pattern: if files are a set of patterns to match
folder: folder to move file into
add: some additional mv parameters
"""
for val in l:
cmd = 'mv '
if add:
cmd += add + ' '
if '*.' in pattern:
cmd += '*'
cmd += val
if '.*' in pattern:
cmd += '*'
cmd += " " + folder
res = os.system(cmd)
if res != 0:
raise Exception("Leave command pressed or command failed")
def batchRename(dt, folder='', sudo=False, doAll=False, add='', dryrun=False):
"""
Given a dict renames corresponding files in a folder
Args:
dt (dict): dict(currentName:newName) renaming dictionnary
folder (str): folder to look into
add: some additional mv parameters
"""
cmd = 'ls -R ' + folder if doAll else 'ls ' + folder
files = os.popen(cmd).read().split('\n')
if doAll:
prep=''
f = []
for val in files:
if len(val)==0:
prep=''
continue
if val[0]=='.' and len(val)>3:
prep=val[:-1]
continue
if "." in val:
f.append(prep+"/"+val)
files = f
for k, val in dt.items():
for f in files:
if k in f:
cmd = 'sudo mv ' if sudo else 'mv '
if add:
cmd += add + ' '
if not doAll:
cmd += folder
cmd += f
cmd += ' '
if not doAll:
cmd += folder
cmd += f.replace(k, val)
if dryrun:
print(cmd)
else:
res = os.system(cmd)
if res != 0:
raise Exception("Leave command pressed or command failed")
def grouped(iterable, n):
"""
iterate over element of list 2 at a time python
s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ...
"""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def overlap(interval1, interval2):
"""computed overlap
Given [0, 4] and [1, 10] returns [1, 4]
Given [0, 4] and [8, 10] returns False
"""
if interval2[0] <= interval1[0] <= interval2[1]:
start = interval1[0]
elif interval1[0] <= interval2[0] <= interval1[1]:
start = interval2[0]
else:
return False
if interval2[0] <= interval1[1] <= interval2[1]:
end = interval1[1]
elif interval1[0] <= interval2[1] <= interval1[1]:
end = interval2[1]
else:
return False
return (start, end)
def union(interval1, interval2):
"""
Given [0, 4] and [1, 10] returns [0, 10]
Given [0, 4] and [8, 10] returns False
"""
if interval1[0] <= interval2[0] <= interval1[1]:
start = interval1[0]
end = interval1[1] if interval2[1] <= interval1[1] else interval2[1]
elif interval1[0] <= interval2[1] <= interval1[1]:
start = interval2[0] if interval2[0] <= interval1[0] else interval1[0]
end = interval1[1]
else:
return False
return (start, end)
def nans(df): return df[df.isnull().any(axis=1)]
def createFoldersFor(filepath):
"""
will recursively create folders if needed until having all the folders required to save the file in this filepath
"""
prevval = ''
for val in os.path.expanduser(filepath).split('/')[:-1]:
prevval += val + '/'
if not os.path.exists(prevval):
os.mkdir(prevval)
def randomString(stringLength=6, stype='all', withdigits=True):
"""
Generate a random string of letters and digits
Args:
stringLength (int, optional): the amount of char. Defaults to 6.
stype (str, optional): one of lowercase, uppercase, all. Defaults to 'all'.
withdigits (bool, optional): digits allowed in the string? Defaults to True.
Returns:
str: random string
"""
if stype == 'lowercase':
lettersAndDigits = string.ascii_lowercase
elif stype == 'uppercase':
lettersAndDigits = string.ascii_uppercase
else:
lettersAndDigits = string.ascii_letters
if withdigits:
lettersAndDigits += string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def pdDo(df, op="mean", of="value1", over="value2"):
"""
apply a function to a panda dataframe WIP
"""
df = df.sort_values(by=over)
index = []
data = df.iloc[0, of]
ret = []
prev = df.iloc[0, over]
j = 0
for k, val in df.iloc[1:].iterrows():
if val[over] == prev:
data.append(val[of])
else:
if of == "mean":
ret[j] = np.mean(data)
elif of == "sum":
ret[j] = np.sum(data)
elif of == "max":
ret[j] = np.max(data)
elif of == "min":
ret[j] = np.min(data)
index.append(k)
j += 1
data = [val[of]]
return index, ret
def parrun(cmds, cores, add=[]):
"""
runs a set of commands in parallel using the "&" command
Args:
cmds: the list of commands
cores: number of parallel execution
add: an additional list(len(cmds)) of command to run in parallel at the end of each parallel run
"""
count = 0
exe = ''
if len(add) != 0 and len(add) != len(cmds):
raise ValueError("we would want them to be the same size")
else:
addexe = ''
fullres = []
for i, cmd in enumerate(cmds):
count += 1
exe += cmd
if len(add) != 0:
addexe += add[i]
if count < cores and i < len(cmds) - 1:
exe += ' & '
if len(add) != 0:
addexe += ' & '
else:
count = 0
res = subprocess.run(exe, capture_output=True, shell=True)
if res.returncode != 0:
raise ValueError('issue with the command: ' + str(res.stderr))
exe = ''
if len(add) != 0:
res = subprocess.run(addexe, capture_output=True, shell=True)
if res.returncode != 0:
raise ValueError(
'issue with the command: ' + str(res.stderr))
addexe = ''
fullres.append(res.stdout.decode('utf-8'))
return fullres
def askif(quest):
"""
asks a y/n question to the user about something and returns true or false given his answer
"""
print(quest)
inp = input()
if inp in ['yes', 'y', 'Y', 'YES', 'oui', 'si']:
return 1
elif inp in ['n', 'no', 'nope', 'non', 'N']:
return 0
else:
return askif('you need to answer by yes or no')
def inttodate(i, lim=1965, unknown='U', sep='-', order="asc", startsatyear=0):
"""
transforms an int representing days into a date
Args:
i: the int
lim: the limited year below which we have a mistake
unknown: what to return when unknown (date is bellow the limited year)
sep: the sep between your date (e.g. /, -, ...)
order: if 'asc', do d,m,y else do y,m,d
startsatyear: when is the year to start counting for this int
Returns:
str: the date or unknown
"""
a = int(i // 365)
if a > lim:
a = str(a + startsatyear)
r = i % 365
m = str(int(r // 32)) if int(r // 32) > 0 else str(1)
r = r % 32
d = str(int(r)) if int(r) > 0 else str(1)
else:
return unknown
return d + sep + m + sep + a if order == "asc" else a + sep + m + sep + d
def datetoint(dt, split='-', unknown='U', order="des"):
"""
same as inttodate but in the opposite way;
starts at 0y,0m,0d
Args:
dt: the date string
split: the splitter in the string (e.g. /,-,...)
unknown: maybe the some dates are 'U' or 0 and the program will output 0 for unknown instead of crashing
order: if 'asc', do d,m,y else do y,m,d
Returns:
int: the date
"""
arr = np.array(dt[0].split(split) if dt[0] !=
unknown else [0, 0, 0]).astype(int)
if len(dt) > 1:
for val in dt[1:]:
arr = np.vstack(
(arr, np.array(val.split(split) if val != unknown and val.count(split) == 2 else [0, 0, 0]).astype(int)))
arr = arr.T
res = arr[2] * 365 + arr[1] * 31 + \
arr[0] if order == "asc" else arr[0] * 365 + arr[1] * 31 + arr[2]
return [res] if type(res) is np.int64 else res
prevshowcount = 100
def showcount(i, size):
"""
pretty print of i/size%, to put in a for loop
"""
global prevshowcount
a = 1 + int(100 * (i / size))
if a != prevshowcount:
print(str(a) + '%', end='\r')
prevshowcount = a
def combin(n, k):
"""
Nombre de combinaisons de n objets pris k a k
outputs the number of comabination of n object taken k at a time
"""
if k > n // 2:
k = n - k
x = 1
y = 1
i = n - k + 1
while i <= n:
x = (x * i) // y
y += 1
i += 1
return x
def dups(lst):
"""
shows the duplicates in a list
"""
seen = set()
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set(x for x in lst if x in seen or seen.add(x))
# turn the set into a list (as requested)
return list(seen_twice)
def makeCombinations(size, proba):
"""
produces probability of X event happening at the same time
pretty usefull for cobinding analysis. wil compute it
given binomial probabilities of each event occuring and the number of trials
Args:
size: int number of trials
proba: list[float] probabilities of each event occuring
"""
sums = {i:0 for i in range(1,size)}
for i in range(size-1,0,-1):
print(i)
if sums[i]> 0:
continue
print(combin(size+3,i))
v=0
for j in itertools.combinations(proba, i):
v+=np.prod(j)
sums[i] = v
for i in range(size-1,0,-1):
for j in range(i+1,size):
icomb = combin(j,i)
sums[i] -= icomb*sums[j]
sums[0] = 1-sum(list(sums.values()))
return sums
def closest(lst, K):
"""
returns the index of the value closest to K in a lst
"""
return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))]
def compareDfs(df1, df2):
"""
compares df1 to df2
shows col difference, index difference, nans & 0s differences
"""
nmissmatchCols = set(df1.columns)-set(df2.columns)
omissmatchCols = set(df2.columns)-set(df1.columns)
nmissmatchInds = set(df1.index)-set(df2.index)
omissmatchInds = set(df2.index)-set(df1.index)
newNAs = df1.isna().sum().sum() - df2.isna().sum().sum()
new0s = (df1 == 0).sum().sum() - (df2 == 0).sum().sum()
print('FOUND missmatch Columns NOT IN df2: ' + str(nmissmatchCols))
print('FOUND missmatch Columns NOT IN df1: ' + str(omissmatchCols))
print('FOUND missmatch Index NOT IN df2: ' + str(nmissmatchInds))
print('FOUND missmatch Index NOT IN df1: ' + str(omissmatchInds))
print('FOUND new NAs in df1: ' + str(newNAs))
print('FOUND new 0s in df1: ' + str(new0s))
return nmissmatchCols, omissmatchCols, nmissmatchInds, omissmatchInds, newNAs, new0s
def stringifydict(res):
"""[summary]
Args:
res ([type]): [description]
Returns:
[type]: [description]
"""
a = {}
for k, v in res.items():
if type(v) is dict:
a[k] = stringifydict(v)
else:
a[str(k)] = v
return a
def readXMLs(folder=None, file=None, rename=None):
"""[summary]
Args:
folder ([type], optional): [description]. Defaults to None.
file ([type], optional): [description]. Defaults to None.
rename ([type], optional): [description]. Defaults to None.
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if file is not None:
if type(file) is str:
print('reading 1 file')
files = [file]
else:
print('reading files')
files = file
elif folder is not None:
print("reading from folder")
files = [i for i in os.listdir(folder) if i.endswith(".xml")]
else:
raise ValueError('need folder or file')
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 22:09:56 2019
@author: Jesus
"""
import numpy as np
import pandas as pd
import requests
import json
from bs4 import BeautifulSoup
import urllib.request as urllib2
import asyncio
import aiohttp
async def call(url, session):
async with session.get(url) as response:
html = await response.json()
return html
def get_raw(ucdp):
urls = get_pages(ucdp)
session = aiohttp.ClientSession()
tasks = [asyncio.ensure_future(call(url, session)) for url in urls]
loop = asyncio.get_event_loop()
responses = loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
return responses
def get_pages(ucdp):
if ucdp == "nonstate":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/nonstate/18.1?pagesize=1000&page=0'
elif ucdp == "dyadic":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/dyadic/18.1?pagesize=1000&page=0'
elif ucdp == "ucdp":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/ucdpprioconflict/18.1?pagesize=1000&page=0'
elif ucdp == "onesided":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/onesided/18.1?pagesize=1000&page=0'
elif ucdp == "battledeaths":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/battledeaths/18.1?pagesize=1000&page=0'
elif ucdp == "gedevents":
url_ucdp = 'http://ucdpapi.pcr.uu.se/api/gedevents/18.1?pagesize=1000&page=0'
urlbase = url_ucdp[:-1]
r = requests.get(url_ucdp).json()
pages = range(r['TotalPages'])
urls = []
for i in pages:
x = urlbase + str(i)
urls.append(x)
return urls
def get_ucdp(ucdp):
"""
Version = 18.1
Options for ucdp:
"nonstate" = Nonstate conflict dataset
"dyadic" = Dyadic conflict dataset
"ucdp" = Full UCDP data set
"onesided" = Onesided conflict data sets
"battledeaths" = Battedeaths data set
"gedevents" = The geo-located events data set
"""
raw = get_raw(ucdp)
dict_temp = {}
full = []
for i in raw:
dict_temp.update(i)
df_temp = | pd.DataFrame(dict_temp['Result']) | pandas.DataFrame |
# SurEmCo - Main file
import time
import json
import os.path
import traceback
from argparse import ArgumentParser
from yaval import Visualizer, Values, VispyPlugin
from yaval.qt import QFileDialog
import cv2
import numpy as np
import pandas as pd
from vispy.scene import visuals
from vispy.visuals.transforms import STTransform
from .io import is_image_file, load_dataset, prepare_dataset
from .misc import Cell, to_rgb8, binarize_image, binarization_to_contours, get_subset_and_snippet, num_tokenize, \
contour_to_mesh
try:
from .tracker import Tracker
except ImportError:
print("WARNING: Custom tracker not available. Was it installed correctly?")
Tracker = None
try:
import trackpy
except ImportError:
print("WARNING: TrackPy not installed.")
trackpy = None
def create_argparser():
parser = ArgumentParser(description="SurEmCo - Superresolution Emitter Counter")
parser.add_argument("files", metavar="files", type=str, nargs='*', default=[],
help="input files, one must be a DIA image")
parser.add_argument("--disable-detection", dest="disable_detection", action='store_true')
parser.add_argument("--drift-correction", dest="drift_correction", action='store_true')
parser.add_argument("--show-unassigned", dest="show_unassigned", action='store_true')
parser.add_argument("--add-cell-border", dest="border", type=float, default=0.0)
parser.add_argument("--calibration", dest="calibration", type=float, default=0.065, help="µm per pixel")
parser.add_argument("--keep-order", dest="keep_order", action='store_true')
parser.add_argument("--debug", dest="debug", action='store_true', default=False)
parser.add_argument("--process", dest="process", action='store_true')
parser.add_argument("--parameters", dest="parameters", default=None)
parser.add_argument("--output", dest="output", default=None)
return parser
class SurEmCo(Visualizer):
title = "SurEmCo - Superresolution Emitter Counter – " + \
"by ModSim Group/IBG-1/<NAME>"
result_table = True
def visualization(self):
parser = create_argparser()
args = parser.parse_args()
if len(args.files) < 2:
args.files, _ = QFileDialog().getOpenFileNames()
if len(args.files) == 1:
while True:
new_fnames, _ = QFileDialog().getOpenFileNames()
if len(new_fnames) == 0:
break
else:
args.files += new_fnames
average_file, tabular_files = None, []
for filename in args.files:
if is_image_file(filename=filename):
average_file = filename
else:
tabular_files.append(filename)
if average_file is None or tabular_files is []:
raise SystemExit
image = cv2.imread(average_file, -1)
if len(image.shape) == 3:
image = image.mean(axis=2)
datasets = []
maximum_frame = 0
for tabular_file in (tabular_files if args.keep_order else sorted(tabular_files, key=num_tokenize)):
print("Reading %s" % (tabular_file,))
local_data = load_dataset(tabular_file)
local_data = prepare_dataset(local_data)
local_data.frame += maximum_frame
datasets.append(local_data)
maximum_frame = local_data.frame.max() + 1
data = | pd.concat(datasets) | pandas.concat |
from pandas import DataFrame
import pandas as pd
import numpy as np
import numpy
def method(arr):
index = 0;
ag = arr
for r in arr:
if not (str(r).replace(" ","")== ""):
temp = "$" + str(ag[index])
ag[index] = temp
if r is None:
ag[index] = " "
index+=1
return ag
def has(string,arr):
for x in arr:
if (str(x).lower() in str(string).lower()):
return True
return False
def getNum(string,arr):
i = 0
for x in arr:
if (str(x).lower() in str(string).lower()):
return i
i += 1
return 0
def repl(arr):
hhh = 0
for g in arr:
if g is None or g == "-" or g=="nan":
arr[hhh] = " "
hhh += 1
return arr
def appen(arr,num):
light = arr
for i in range(0,num):
numpy.append(light, [" "])
return light
writer1 = pd.ExcelFile("publicColleges+tuitions.xlsx")
df = writer1.parse("Sheet1")
arr = df.as_matrix()
index = 0
A = arr[:, 0]
puCOST_IN = arr[:, 1]
puCOST_OUT = arr[:, 2]
puLIVE = repl(arr[:, 3])
for s in A:
A[index] = A[index].replace("-"," ")
index += 1
reader1 = pd.ExcelFile("collegeGridLoans.xlsx")
df2 = reader1.parse("Sheet1")
arr2 = df2.as_matrix()
index = 0
B = arr2[:, 0] #big grid college names
writer2 = pd.ExcelFile("privateColleges+tuitions.xlsx")
df3 = writer2.parse("Sheet1")
arr3 = df3.as_matrix()
index = 0
G = arr3[:, 0]
prCOST_IN = arr3[:, 1]
prCOST_OUT = arr3[:, 2]
prLIVE = repl(arr[:, 3])
#Big grid college names: B
#public college names: A
#private college names: G
privateCostsIN = [None] * len(B)
publicCostsIN = [None] * len(B)
privateCostsOUT = [None] * len(B)
publicCostsOUT = [None] * len(B)
privateLive = [None] * len(B)
publicLive = [None] * len(B)
both_live = [None] * len(B)
both_costIN = [None] * len(B)
both_costOUT = [None] * len(B)
pubPriv = [None] * len(B)
print(prLIVE)
for streamline in A:
if has(streamline,B):
e = getNum(streamline,B)
r = getNum(streamline,A)
pubPriv[e] = "Public"
publicCostsIN[e] = puCOST_IN[r]
publicCostsOUT[e] = puCOST_OUT[r]
both_costOUT[e] = puCOST_OUT[r]
both_costIN[e] = puCOST_IN[r]
if r < 701:
publicLive[e] = puLIVE[r]
both_live[e] = puLIVE[r]
for streamline in G:
if has(streamline, B):
e = getNum(streamline, B)
r = getNum(streamline, G)
if 0 <= e and e < len(B) and 0 <= r and r < len(B):
pubPriv[e] = "Private"
privateCostsIN[e] = prCOST_IN[r]
privateCostsOUT[e] = prCOST_OUT[r]
both_costOUT[e] = prCOST_OUT[r]
both_costIN[e] = prCOST_IN[r]
if r < 701:
privateLive[e] = prLIVE[r]
both_live[e] = prLIVE[r]
pubPriv = repl(pubPriv)
both_costIN = repl(both_costIN)
both_costOUT = repl(both_costOUT)
both_costIN = method(both_costIN)
both_live = method(both_live)
both_costOUT = method(both_costOUT)
df2['On-Campus Tuition'] = both_costIN
df2['Off-Campus Tuition'] = both_costOUT
df2['Housing'] = both_live
df2.loc[:, 10] = pd.Series(pubPriv, index=df2.index)
df = | pd.DataFrame({'Public/Private': pubPriv, 'On-campus Tuition': both_costIN, 'Off-Campus Tuition': both_costOUT, 'Housing': both_live}) | pandas.DataFrame |
import pandas as pd
import numpy as np
#读入会员持仓数据
long_count=pd.read_csv("long_count.csv",index_col=0)
short_count=pd.read_csv("short_count.csv",index_col=0)
long_count=long_count.set_index(['trading_date','underlying_symbol'])
short_count=short_count.set_index(['trading_date','underlying_symbol'])
#将两张表拼接起来
df_count=long_count.join(short_count,how='outer',rsuffix='short')
##持仓变化率类
#hp1净多头持仓量变化率
#建立函数 计算净多头持仓量变化率
def cal_hp1(df):
#新建一个数据框用于返回
df_return=pd.DataFrame(index=df.index)
for R in [1,2,3,4,5,10,20]:
for rank in [5,10,20]:
print(str(R)+str(rank))
single=(df['top'+str(rank)]-df['top'+str(rank)+'short'])/(df['top'+str(rank)]-df['top'+str(rank)+'short']).shift(R)-1
single=pd.DataFrame(single)
single.columns=['R'+str(R)+'top'+str(rank)]
df_return=df_return.join(single)
df_return=df_return.shift(1)
return df_return
hp1=df_count.groupby(level=1).apply(cal_hp1)
#hp2多头持仓量变化率
def cal_hp2(df):
df_return=pd.DataFrame(index=df.index)
for R in [1,2,3,4,5,10,20]:
for rank in [5,10,20]:
single=df['top'+str(rank)]/df['top'+str(rank)].shift(R)-1
single=pd.DataFrame(single)
single.columns=['R'+str(R)+'top'+str(rank)]
df_return=df_return.join(single)
df_return = df_return.shift(1)
return df_return
hp2=df_count.groupby(level=1).apply(cal_hp2)
#hp3 空头持仓量变化率
def cal_hp3(df):
df_return=pd.DataFrame(index=df.index)
for R in [1,2,3,4,5,10,20]:
for rank in [5,10,20]:
single=1-df['top'+str(rank)+'short']/df['top'+str(rank)+'short'].shift(R)
single=pd.DataFrame(single)
single.columns=['R'+str(R)+'top'+str(rank)]
df_return=df_return.join(single)
df_return = df_return.shift(1)
return df_return
hp3=df_count.groupby(level=1).apply(cal_hp3)
##持仓占比变化类
#hp4 多仓占比变化
def cal_hp4(df):
df_return = pd.DataFrame(index=df.index)
for R in [1, 2, 3, 4, 5, 10, 20]:
for rank in [5, 10, 20]:
single =(df['top'+str(rank)]/(df['top'+str(rank)]+df['top'+str(rank)+'short']))-(df['top'+str(rank)]/(df['top'+str(rank)]+df['top'+str(rank)+'short'])).shift(R)
single = pd.DataFrame(single)
single.columns = ['R' + str(R) + 'top' + str(rank)]
df_return = df_return.join(single)
df_return = df_return.shift(1)
return df_return
hp4=df_count.groupby(level=1).apply(cal_hp4)
#hp5 空仓占比变化
def cal_hp5(df):
df_return = | pd.DataFrame(index=df.index) | pandas.DataFrame |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
print(df_all.columns.tolist())
print (df_all.info())
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
avg_hospid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('HospID')['HospID'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('HospID')['HospID'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('HospID')['HospID'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('HospID')['HospID'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('HospID')['HospID'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('HospID')['HospID'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('HospID')['HospID'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('HospID')['HospID'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('HospID')['HospID'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('HospID')['HospID'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/total op sum all years HospID.csv")
# print("details on site id dist:")
# # print("num of all sites: ", len(df_sum_all_Years))
#
# less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
# less_8.to_csv("total op less 10 years siteid.csv")
# print("num of sites with less years: ", len(less_8))
#
# x = np.array(less_8['Distinct_years'])
# print(np.unique(x))
avg_hospid['HospID'] = df_sum_all_Years['HospID']
avg_hospid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_hospid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_hospid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_reop():
df2010 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 = | pd.merge(df1, df2012, on='HospID', how='outer') | pandas.merge |
import streamlit as st
import pickle
import pandas as pd
import requests
def fetch_poster(movie_id):
response = requests.get('https://api.themoviedb.org/3/movie/{'
'}?api_key=a4224711471f9fd28eadf55d571dbd08&language=en-US'.format(movie_id))
data = response.json()
return "https://image.tmdb.org/t/p/w500/"+data['poster_path']
def recommend(movie):
movie_index = movies[movies['title'] == movie].index[0]
# Here we are using enumerate function for not loose the index because if we sort the list our index will also suffle
distance = similarity[movie_index]
movies_list = sorted(list(enumerate(distance)), reverse=True, key=lambda x: x[1])[1:6]
recommended_movies = []
recommended_movies_posters = []
for i in movies_list:
movie_id = movies.iloc[i[0]].movie_id
recommended_movies.append(movies.iloc[i[0]].title)
recommended_movies_posters.append(fetch_poster(movie_id))
return recommended_movies, recommended_movies_posters
movies_list = pickle.load(open('movies.pkl','rb'))
movies = | pd.DataFrame(movies_list) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": | Index([0, 2], dtype="int64") | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Feb 21, 2021
# About: graph building is for the class(es) and related functions used to make radar detections into graphs, and pulling out their data.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
## general import for data manipulation, file gathering
import numpy as np
import matplotlib.pyplot as pt
import csv
import pandas as pd
import cantools
import matplotlib.animation as animation
from matplotlib import style
from haversine import haversine, Unit
import itertools
import math
import strym as s
import networkx as nx
import os
import glob
try:
dbc_location = '/home/ggrumm/strym/strym/dbc/toyota_rav4_hybrid.dbc'
db2 = s.initializeDBC_Cantools(dbc_location)
print('Loaded DBC from: {}'.format(dbc_location))
except:
print('make sure to import and/or locate your dbc file')
def setDBC(filePath):
'''Initialize the DBC from strym, give the filepath to your dbc file.'''
db2 = s.initializeDBC_Cantools(filePath)
def search_files(directory='.', extension=''):
'''Search for files below directory that contain the extension.'''
extension = extension.lower()
matches = []
for dirpath, dirnames, files in os.walk(directory):
for name in files:
if extension and name.lower().endswith(extension):
matches.append(os.path.join(dirpath, name))
elif not extension:
matches.append(os.path.join(dirpath, name))
return matches
def trackRadar(i,data):
'''Pull in all the relevant radar signals from a Toyota radar CAN data csv file.\
This function gets everything from a single "track" on a Toyota'''
lon = s.convertData(i,1,data,db2)
lat = s.convertData(i,2,data,db2)
lat = lat.reset_index(drop=True)
relv = s.convertData(i,4,data,db2)
relv=relv.reset_index(drop=True)
score = s.convertData(i+16,'SCORE',data,db2)
score = score.reset_index(drop=True)
lon = lon.rename(columns={"Message": "Longitude"})
lon = lon.reset_index(drop=True)
rel_accel = s.convertData(i+16,'REL_ACCEL',data,db2)
rel_accel = rel_accel.reset_index(drop=True)
lon['rel_accel'] = rel_accel.Message
lon['Latitude'] = lat.Message
lon['Relv'] = relv.Message
lon['Score'] = score.Message
lon['Track'] = i
lon1 = lon.loc[
lon.Longitude <= 330
]
return lon1
def allRadar(data):
'''With a CSV file as input, this function outputs the complete set of \
radar detections from a Toyota RAV4. Sorted by Time by default.'''
bigFrame = pd.DataFrame()
for i in range(384,400):
temp = trackRadar(i,data)
bigFrame = pd.concat([temp,bigFrame])
bigFrame = bigFrame.sort_values(by='Time')
return bigFrame
class Detection():
"""This class is to create an object for each radar detection in the data."""
newid = itertools.count()
beta_i = 0.02 #rate of false positives in sensor (arbitrary at the moment)
def __init__(self,datapoint):
self.x = datapoint.Longitude
self.y = datapoint.Latitude
self.rv = datapoint.Relv
self.a = datapoint.rel_accel
self.t = datapoint.Time
self.cost = math.log(self.beta_i/(1-self.beta_i),10)
self.id = Detection.newid.__next__()
self.score = datapoint.Score
self.track = datapoint.Track
# self.next = []
# self.prev = []
def getAll(self):
print('ID: ',self.id,'\nTime: ',self.t, '\nLong: ',self.x,'\nLat: ',self.y,'\nRel_Vel: ',
self.rv,'\nRel_Accel: ',self.a,'\nScore: ',self.score)
def costij(self,v):
dist = 5*np.sqrt((self.x-v.x)**2+2*(self.y-v.y)**2)
rvdist = 1000*abs(self.rv-v.rv)**2
extra = 0
if abs(self.x-v.x) > 10:
extra = 1000*abs(self.x-v.x)
return dist + rvdist + extra
def add_arcij(self,v):
G_arr[self.id,v.id] == 1
def rm_arcij(self,v):
G_arr[self.id,v.id] == 0
class myGraph():
"""This class is to create graphs from CAN radar detections using NetworkX and the Detection class.
These graphs can be analyzed to track the objects being tracked in the radar data. Tip: remove invalid
radar values above 327m to improve speed. Larger datasets have increased time to finish."""
def __init__(self,datapoints,dt = 0.5):
self.datapoints = datapoints
self.G = nx.DiGraph()
self.G.add_node("s", demand=-1)
self.G.add_node("t", demand=1)
self.delta_t_max = dt
self.create()
def change_dt(self,newDT):
self.delta_t_max = newDT
self.create()
def get_dt(self):
return self.delta_t_max
def create(self):
self.G.clear()
count = -1
for i in self.datapoints.iterrows(): #for each datapoint
count +=1
v = Detection(i[1]) #make a detection object
self.G.add_node(v.id,obj=v) #add the node from the datapoint
self.G.add_edge('s',v.id,weight=0) #add the edge from s
self.G.add_edge(v.id,'t',weight=0) #add the edge from t
if count > 0: #if not first, add edges from older vertices
# print(v.id)
v = self.G.nodes[v.id]['obj']
# delta_t = 0
# print(v.id)
n = 1
u = self.G.nodes[v.id-n]['obj']
delta_t = u.t-v.t
while delta_t < self.delta_t_max:
# print(delta_t)
self.G.add_edge(v.id-n,v.id,weight=v.costij(u)+v.cost )
# (10*np.sqrt(abs(delta_t)))
#add the edge(s) from nearby detections
#cost is euclidean distance + log likelihood real detection + 10*sqrt(dt)
# if n < v.id:
n+=1
try:
u = self.G.nodes[v.id-n]['obj']
delta_t = v.t-u.t
except:
delta_t = 100
# elif n >= v.id-1:
#define a successive shortest path algorithm to extract the tracjectories
def SSP(H, s='s', t='t'):
"""Successive shortest path algorithm using Bellman-Ford. Robust to negative weights. Graph H, and the source and target
nodes of the flow network are needed as inputs. """
J = H.copy()
ax1 = pt.subplot(121)
# pt.ylim([0,180])
ax2 = pt.subplot(122)
# pt.ylim([0,180])
paths = []
path_cost = nx.shortest_path_length(J,s,t, weight='weight',method='bellman-ford')
# print(path_cost)
stop = False
while (len(J.nodes) > 2 and path_cost < 0 and stop == False): #find a shortest path between s and t until they have all been removed
# print('finding shortest path!')
# print(path_cost)
path_cost = nx.shortest_path_length(J,s,t, weight='weight',method='bellman-ford')
if path_cost < 0:
path = nx.shortest_path(J,s,t, weight='weight',method='bellman-ford')
if len(path)< 40:
stop = True
# print('found!')
# print(path_cost)
# print(path)
paths.append(path)
# mydf = getPath(path,J)
# if len(path) > 40:
# print(len(path),path_cost)
# # pt.figure()
# ax1.plot(mydf.y,mydf.x ,ls='',marker='.',markersize=3)
# ax2.plot(mydf.t,mydf.x,ls='',marker='.',markersize=3)
# # pt.figure()
for i in path:
# print(path)
# print()
if i != 't' and i != 's':
# print(H.nodes[i]['obj'].getAll())
# if G.nodes[i]['obj'].x >=320:
# print(G.nodes[i]['obj'].id,G.nodes[i]['obj'].t,G.nodes[i]['obj'].x,G.nodes[i]['obj'].y,G.nodes[i]['obj'].cost)
J.remove_node(i)
return paths
def getPath(path, J):
"""This function takes a path and NetworkX graph, and outputs a df of the relevant data."""
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import jellyfish
def find_closest_string(list_string, list_candidate, no_perfect_match=True):
list_df = []
for k in range(len(list_string)):
for c in range(len(list_candidate)):
match_name = list_string[k]
candidate = list_candidate[c]
if not pd.isna(match_name):
if not pd.isna(candidate):
if no_perfect_match:
if candidate == match_name:
continue
dist = jellyfish.levenshtein_distance(match_name, candidate)
list_df.append(
pd.DataFrame({'string' : match_name,
'candidate' : candidate,
'dist':dist}, index = [0]))
result = pd.concat(list_df)
result = result.sort_values("dist").groupby("candidate", as_index=False).first()
return(result)
def clean_moughataa_col(df):
mg_list = find_closest_string(df.moughataa.unique(), df.moughataa.unique())
mg_list = mg_list[mg_list['dist'] == 1].reset_index()
"""
for r in range(len(mg_list)):
try:
s = mg_list.loc[r, 'string']
mg_list = mg_list[~(mg_list['candidate']==s)]
except:
pass
#mg_list.loc[mg_list['candidate'] == 'teyarett', 'string'] = 'teyarett'
try:
mg_list = mg_list.reset_index(drop=False)
except:
pass
"""
mough = pd.DataFrame({'moughataa':df.moughataa.unique()})
for s in range(len(mg_list.index)):
string = mg_list.loc[s, 'string']
string_replacing = mg_list.loc[s, 'candidate']
mough.loc[mough['moughataa'] == string, 'moughataa2'] = string_replacing
for m in range(len(mough.index)):
if | pd.isna(mough.loc[m, 'moughataa2']) | pandas.isna |
from .base import Transformer
import pandas as pd
import numpy as np
import os
ISO_COUNTRY_CODES = os.path.join(os.path.dirname(__file__), 'countrycodes.csv')
class UCDPTransformer(Transformer):
""" Data source specific transformers """
def __init__(self, source, target):
super().__init__(source, target)
self.iso = pd.read_csv(ISO_COUNTRY_CODES,
usecols=[0, 2],
names=['name', 'iso3'],
header=0)
def read(self):
try:
self.ucdp_df = pd.read_csv(self.source[0])
except FileNotFoundError as exc:
raise ValueError("Source file {} not found.".format(self.source)) \
from exc
def write(self):
self.df.to_csv(self.target, mode='w', index=False)
def transform(self):
# self.transform_forcibly_displaced_populations()
self.transform_ucdp()
self.transform_country_code()
def __repr__(self):
return "<UCDPTransformer data for {}-{} ({} rows)>".format(self.df['year'].min(),
self.df['year'].max(),
len(self.df))
def transform_ucdp(self):
events_df = self.ucdp_df.groupby(['year', 'country'])['best'].agg(['size'])
events_df.reset_index(inplace=True)
unique_states = events_df.drop_duplicates("country")[["country"]]
unique_states["key"] = 1
unique_years = events_df.drop_duplicates("year")[["year"]]
unique_years["key"] = 1
states_years_df = pd.merge(unique_states, unique_years, on = "key").drop("key",axis=1)
events_df = pd.merge(states_years_df, events_df, how = "left", on = ["country","year"]).fillna(0)
events_df.columns.values[2] = 'value'
subevents_df = self.ucdp_df.groupby(['year', 'country', 'type_of_violence'])['best'].agg(['size'])
subevents_df.reset_index(inplace=True)
state_based_df = subevents_df[subevents_df['type_of_violence'] == 1][['year', 'country', 'size']]
state_based_df = pd.merge(states_years_df, state_based_df, how = "left", on = ["country","year"]).fillna(0)
state_based_df.columns.values[2] = 'value'
nonstate_df = subevents_df[subevents_df['type_of_violence'] == 2][['year', 'country', 'size']]
nonstate_df = pd.merge(states_years_df, nonstate_df, how = "left", on = ["country","year"]).fillna(0)
nonstate_df.columns.values[2] = 'value'
one_sided_df = subevents_df[subevents_df['type_of_violence'] == 3][['year', 'country', 'size']]
one_sided_df = pd.merge(states_years_df, one_sided_df, how = "left", on = ["country","year"]).fillna(0)
one_sided_df.columns.values[2] = 'value'
rakhine_df = self.ucdp_df[self.ucdp_df['adm_1'] == "Rakhine State"]
myanmar_df = rakhine_df.groupby(['year'])['best'].agg(['size'])
myanmar_df = pd.merge(unique_years, myanmar_df, how = "left", on = ["year"]).fillna(0).drop("key", axis=1)
myanmar_df.columns.values[1] = 'value'
myanmar_df["country"] = "Myanmar"
events_df.loc[:, "Indicator Code"] = "UC.EVT.TOT"
events_df.loc[:, "Indicator Name"] = "Number of conflict events per year"
state_based_df.loc[:, "Indicator Code"] = "UC.EVT.STA"
state_based_df.loc[:, "Indicator Name"] = "Number of state-based conflict events per year"
nonstate_df.loc[:, "Indicator Code"] = "UC.EVT.NON"
nonstate_df.loc[:, "Indicator Name"] = "Number of non-state conflict events per year"
one_sided_df.loc[:, "Indicator Code"] = "UC.EVT.ONE"
one_sided_df.loc[:, "Indicator Name"] = "Number of one-sided conflict events per year"
myanmar_df.loc[:, "Indicator Code"] = "UC.EVT.RAKH"
myanmar_df.loc[:, "Indicator Name"] = "Number of conflict events in Rakhine State per year"
events_fatal = self.ucdp_df.groupby(['year', 'country'])['best'].agg(['sum'])
events_fatal.reset_index(inplace=True)
events_fatal = pd.merge(states_years_df, events_fatal, how = "left", on = ["country","year"]).fillna(0)
events_fatal.columns.values[2] = 'value'
civil_fatal = self.ucdp_df.groupby(['year', 'country'])['deaths_civilians'].agg(['sum'])
civil_fatal.reset_index(inplace=True)
civil_fatal = pd.merge(states_years_df, civil_fatal, how = "left", on = ["country","year"]).fillna(0)
civil_fatal.columns.values[2] = 'value'
subevents_fatal = self.ucdp_df.groupby(['year', 'country', 'type_of_violence'])['best'].agg(['sum'])
subevents_fatal.reset_index(inplace=True)
state_based_fatal = subevents_fatal[subevents_df['type_of_violence'] == 1][['year', 'country', 'sum']]
state_based_fatal = pd.merge(states_years_df, state_based_fatal, how = "left", on = ["country","year"]).fillna(0)
state_based_fatal.columns.values[2] = 'value'
nonstate_fatal = subevents_fatal[subevents_df['type_of_violence'] == 2][['year', 'country', 'sum']]
nonstate_fatal = pd.merge(states_years_df, nonstate_fatal, how = "left", on = ["country","year"]).fillna(0)
nonstate_fatal.columns.values[2] = 'value'
one_sided_fatal = subevents_fatal[subevents_df['type_of_violence'] == 3][['year', 'country', 'sum']]
one_sided_fatal = pd.merge(states_years_df, one_sided_fatal, how = "left", on = ["country","year"]).fillna(0)
one_sided_fatal.columns.values[2] = 'value'
rakhine_fatal = self.ucdp_df[self.ucdp_df['adm_1'] == "Rakhine State"]
myanmar_fatal = rakhine_fatal.groupby(['year'])['best'].agg(['sum'])
myanmar_fatal = pd.merge(unique_years, myanmar_fatal, how = "left", on = ["year"]).fillna(0).drop("key", axis=1)
myanmar_fatal.columns.values[1] = 'value'
myanmar_fatal["country"] = "Myanmar"
events_fatal.loc[:, "Indicator Code"] = "UC.FAT.TOT"
events_fatal.loc[:, "Indicator Name"] = "Fatalities from conflict events per year"
civil_fatal.loc[:, "Indicator Code"] = "UC.FAT.CIV"
civil_fatal.loc[:, "Indicator Name"] = "Civilian fatalities from conflict events per year"
state_based_fatal.loc[:, "Indicator Code"] = "UC.FAT.STA"
state_based_fatal.loc[:, "Indicator Name"] = "Fatalities from state-based conflict events per year"
nonstate_fatal.loc[:, "Indicator Code"] = "UC.FAT.NON"
nonstate_fatal.loc[:, "Indicator Name"] = "Fatalities from non-state conflict events per year"
one_sided_fatal.loc[:, "Indicator Code"] = "UC.FAT.ONE"
one_sided_fatal.loc[:, "Indicator Name"] = "Fatalities from one-sided conflict events per year"
myanmar_fatal.loc[:, "Indicator Code"] = "UC.FAT.RAKH"
myanmar_fatal.loc[:, "Indicator Name"] = "Number of fatalities in Rakhine State per year"
self.ucdp_df = events_df.append(state_based_df, sort="True").append(nonstate_df, sort="True").append(one_sided_df, sort="True").append(myanmar_df, sort="True").append(events_fatal, sort="True").append(state_based_fatal, sort="True").append(nonstate_fatal, sort="True").append(one_sided_fatal, sort="True").append(civil_fatal, sort="True").append(myanmar_fatal, sort="True")
self.ucdp_df = self.ucdp_df.dropna(how='any', axis=0)
self.df = self.ucdp_df
def transform_country_code(self):
# map country codes
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("United States", "United States of America")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Trinidad & Tobago", "Trinidad and Tobago")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Venezuela", "Venezuela (Bolivarian Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Bolivia", "Bolivia (Plurinational State of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("United Kingdom", "United Kingdom of Great Britain and Northern Ireland")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Germany West", "Germany")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Germany East", "Germany")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Czechoslovakia", "Czechia")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Czech Republic", "Czechia")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Slovak Republic", "Slovakia")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Macedonia",
"Macedonia (the former Yugoslav Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Yugoslavia", "Yugoslavia") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Bosnia", "Bosnia and Herzegovina")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Serbia and Montenegro", "Serbia") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Moldova", "Moldova (Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Russia", "Russian Federation")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Ivory Coast", "Côte d'Ivoire")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Congo-Brazzaville", "Congo")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Congo-Kinshasa",
"Congo (Democratic Republic of the)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Congo Brazzaville", "Congo")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Congo Kinshasa",
"Congo (Democratic Republic of the)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Tanzania", "Tanzania, United Republic of")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Iran", "Iran (Islamic Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Syria", "Syrian Arab Republic")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Palestine", "Palestine, State of")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Yemen Arab Republic", "Yemen")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Yemen PDR", "Yemen")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Taiwan", "Taiwan, Province of China")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Korea North",
"Korea (Democratic People's Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Korea South", "Korea (Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("North Korea",
"Korea (Democratic People's Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("South Korea", "Korea (Republic of)")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Myanmar (Burma)", "Myanmar")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Laos", "Lao People's Democratic Republic")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Vietnam", "Viet Nam")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("East Timor", "Timor-Leste")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Eastern Europe", "Eastern Europe") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Latin America", "Latin America") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Former Yugoslavia", "Former Yugoslavia") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Former USSR", "Former USSR") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Cape Verde", "Cabo Verde")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Central African Repub", "Central African Republic")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Vietnam North", "Viet Nam") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Guinea Bissau", "Guinea-Bissau")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Kosovo", "Serbia") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Vietnam South", "Viet Name") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("(North) Sudan", "Sudan") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Trinidad", "Trinidad and Tobago") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("USSR (Soviet Union)", "Russian Federation") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Yemen North", "Yemen") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Yemen South", "Yemen") #
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Cape Verde", "Cabo Verde")
self.df.ix[:, "country"] = self.df.ix[:, "country"].replace("Viet Name", "Viet Nam")
self.df = | pd.merge(self.df, self.iso, how='left', left_on='country', right_on='name') | pandas.merge |
"""
Execute a partition strategy using multiple Gretel jobs.
This module will consume a dataset and create a partitioning strategy. This strategy will be saved
to a JSON file. Within each partition there is a ``ctx`` object that will contain metadta about the
various models and handlers used. The context object has the following shape:
{
"artifact": "artifact used to train model",
"status": "one of Gretel's job statuses",
"model_id": "the model ID",
"attempt": "number of times the model has attempted training",
"sqs": "an object that contains SQS information from Gretel's API",
"last_handler": {
"artifact": "any seed data that was used for generation",
"attempt": "number of tries to generate",
"status": "a Gretel job status",
"handler_id": "the record handler ID",
"num_records": "how many records were generated, automatically overloaded if seeds are provided"
}
}
"""
from __future__ import annotations
import json
import logging
import tempfile
import time
from collections import Counter
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, wait
from copy import deepcopy
from dataclasses import dataclass
from functools import wraps
from pathlib import Path
from typing import List, Optional, Union
import pandas as pd
import smart_open
from gretel_client.projects import Project
from gretel_client.projects.jobs import ACTIVE_STATES
from gretel_client.projects.models import Model, Status
from gretel_client.projects.records import RecordHandler
from gretel_client.rest import ApiException
from gretel_client.users.users import get_me
from gretel_trainer.strategy import (Partition, PartitionConstraints,
PartitionStrategy)
MODEL_ID = "model_id"
HANDLER_ID = "handler_id"
STATUS = "status"
ARTIFACT = "artifact"
SQS = "sqs"
ATTEMPT = "attempt"
HANDLER = "last_handler"
NUM_RECS = "num_records"
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
@dataclass
class ArtifactResult:
id: str
record_count: int
@dataclass
class GenPayload:
num_records: int
seed_df: Optional[pd.DataFrame] = None
seed_artifact_id: Optional[str] = None
max_invalid: Optional[int] = None
def _needs_load(func):
@wraps(func)
def wrapper(inst: StrategyRunner, *args, **kwargs):
if not inst._loaded:
inst.load()
return func(inst, *args, **kwargs)
return wrapper
@dataclass
class RemoteDFPayload:
partition: int
slot: int
job_type: str
uid: str
handler_uid: str
project: Project
artifact_type: str
df: pd.DataFrame = None
def _remote_dataframe_fetcher(payload: RemoteDFPayload) -> RemoteDFPayload:
# We need the model object no matter what
model = Model(payload.project, model_id=payload.uid)
job = model
# if we are downloading handler data, we reset our job
# to the specific handler object
if payload.job_type == "run":
job = RecordHandler(model, record_id=payload.handler_uid)
download_url = job.get_artifact_link(payload.artifact_type)
payload.df = pd.read_csv(download_url, compression="gzip")
return payload
def _maybe_submit_job(
job: Union[Model, RecordHandler]
) -> Optional[Union[Model, RecordHandler]]:
try:
job = job.submit_cloud()
except ApiException as err:
if "Maximum number of" in str(err):
logger.warning(
"Rate limiting: Max jobs created, skipping new job for now..."
)
return None
return job
class StrategyRunner:
_df: pd.DataFrame
_cache_file: Path
_constraints = PartitionConstraints
_strategy = PartitionStrategy
_model_config: dict
_max_jobs_active: int
_project: Project
_loaded: bool
_artifacts: List[str]
_cache_overwrite: bool
_max_artifacts: int = 25
_status_counter: Counter
_handler_status_counter: Counter
_error_retry_limit: int
strategy_id: str
def __init__(
self,
*,
strategy_id: str,
df: pd.DataFrame,
cache_file: Union[str, Path],
cache_overwrite: bool = False,
model_config: dict,
partition_constraints: PartitionConstraints,
project: Project,
error_retry_limit: int = 3,
):
self._df = df
self._cache_file = Path(cache_file)
self._constraints = partition_constraints
self._model_config = model_config
self._project = project
self._loaded = False
self._cache_overwrite = cache_overwrite
self._artifacts = []
self.strategy_id = strategy_id
self._status_counter = Counter()
self._error_retry_limit = error_retry_limit
def load(self):
"""Hydrate the instance before we can start
doing work, must be called after init
"""
if self._loaded:
return
self._refresh_max_job_capacity()
# If the cache file exists, we'll try and load an existing
# strategy. If not, we'll create a new strategy with the
# provided constraints.
if self._cache_file.exists() and not self._cache_overwrite:
self._strategy = PartitionStrategy.from_disk(self._cache_file)
else:
self._strategy = PartitionStrategy.from_dataframe(
self.strategy_id, self._df, self._constraints
)
self._strategy.save_to(self._cache_file, overwrite=True)
self._loaded = True
@classmethod
def from_completed(
cls, project: Project, cache_file: Union[str, Path]
) -> StrategyRunner:
cache_file = Path(cache_file)
if not cache_file.exists():
raise ValueError("cache file does not exist")
inst = cls(
strategy_id="__none__",
df=None,
cache_file=cache_file,
model_config=None,
partition_constraints=None,
project=project,
)
inst.load()
return inst
def _update_job_status(self):
# Get all jobs that have been created, we can do this
# by just searching for any partitions have have a "model_id"
# set
partitions = self._strategy.query_glob(MODEL_ID, "*")
# logger.info(f"Fetching updates for {len(partitions)} models...")
self._status_counter = Counter()
for partition in partitions:
last_status = partition.ctx.get(STATUS)
# If we are done successfully, go on to the next one
if last_status in (Status.COMPLETED,):
self._status_counter.update([Status(last_status)])
continue
model_id = partition.ctx.get(MODEL_ID)
# Hydrate a Model object from the remote API
current_model = Model(self._project, model_id=model_id)
# Update our strategy-wide counter of model states
self._status_counter.update([current_model.status])
# Did our model status change?
if last_status != current_model.status:
logger.info(
f"Partition {partition.idx} status change from {last_status} to {current_model.status}"
)
_update = {STATUS: current_model.status}
if current_model.status == Status.COMPLETED:
report = current_model.peek_report()
if report is None:
with smart_open.open(
current_model.get_artifact_link("report_json")
) as fin:
report = json.loads(fin.read())
sqs = report["synthetic_data_quality_score"]["score"]
label = "Moderate"
if sqs >= 80:
label = "Excellent"
elif sqs >= 60:
label = "Good"
if last_status != current_model.status:
logger.info(
f"Partition {partition.idx} completes with SQS: {label} ({sqs})"
)
_update.update({SQS: report})
partition.update_ctx(_update)
self._strategy.status_counter = dict(self._status_counter)
# Aggressive, but save after every update
self._strategy.status_counter = dict(self._status_counter)
self._strategy.save()
# If every partition is done, we may not have saved the strategy
self._strategy.status_counter = dict(self._status_counter)
self._strategy.save()
def _update_handler_status(self):
partitions = self._strategy.partitions
self._handler_status_counter = Counter()
for partition in partitions:
model_id = partition.ctx.get(MODEL_ID)
handler_id = partition.ctx.get(HANDLER, {}).get(HANDLER_ID)
last_status = partition.ctx.get(HANDLER, {}).get(STATUS)
# No need to refresh completed handlers from the remote API
if last_status in (Status.COMPLETED,):
self._handler_status_counter.update([Status(last_status)])
continue
if not handler_id:
continue
# Hydrate a Model and Handler object from the remote API
current_model = Model(self._project, model_id=model_id)
current_handler = RecordHandler(
model=current_model, record_id=handler_id)
self._handler_status_counter.update([current_handler.status])
if last_status != current_handler.status:
logger.info(
f"Partition {partition.idx} record generation status change from {last_status} to {current_handler.status}"
)
partition.ctx[HANDLER][STATUS] = current_handler.status
self._strategy.save()
@_needs_load
def cancel_all(self):
partitions = self._strategy.query_glob(MODEL_ID, "*")
for partition in partitions:
model_id = partition.ctx.get(MODEL_ID)
# Hydrate a Model object from the remote API
current_model = Model(self._project, model_id=model_id)
logger.warning(f"Cancelling: {current_model.id}")
current_model.cancel()
def _refresh_max_job_capacity(self):
self._max_jobs_active = get_me()["service_limits"]["max_jobs_active"]
@property
@_needs_load
def has_capacity(self) -> bool:
num_active = len(self._gather_statuses(ACTIVE_STATES))
self._refresh_max_job_capacity()
return num_active < self._max_jobs_active
def _remove_unused_artifact(self) -> Optional[str]:
project_artifacts = self._project.artifacts
curr_artifacts = set()
if len(project_artifacts) < self._max_artifacts:
return "__none__"
# First we try and remove an artifact from this strategy by
# looking at our model states
for p in self._strategy.partitions:
status = p.ctx.get(STATUS)
artifact_key = p.ctx.get(ARTIFACT)
curr_artifacts.add(artifact_key)
# We don't want to delete an artifact that is maybe
# about to be used
if status is None or status in ACTIVE_STATES:
continue
if artifact_key:
logger.debug(
f"Attempting to remove artifact: {p.ctx.get(ARTIFACT)}")
self._project.delete_artifact(artifact_key)
p.update_ctx({ARTIFACT: None})
self._strategy.save()
return artifact_key
# try and remove a seed artifact if one exists
handler_status = p.ctx.get(HANDLER, {}).get(STATUS)
handler_artifact_key = p.ctx.get(HANDLER, {}).get(ARTIFACT)
if handler_status is None or handler_status in ACTIVE_STATES:
continue
if handler_artifact_key:
logger.debug(
f"Attempting to remove handler artifact: {handler_artifact_key}"
)
self._project.delete_artifact(handler_artifact_key)
p.ctx[HANDLER][ARTIFACT] = None
return handler_artifact_key
# If we couldn't remove an artifact from this current strategy,
# we'll just remove some other random one
try:
logger.debug("Removing artifact not belonging to this Strategy...")
for art in project_artifacts:
key = art.get("key")
if key in curr_artifacts:
continue
self._project.delete_artifact(key)
return key
except Exception as err:
logger.warning(f"Could not delete artifact: {str(err)}")
return None
def _partition_to_artifact(self, partition: Partition) -> Optional[ArtifactResult]:
removed_artifact = self._remove_unused_artifact()
if not removed_artifact:
logger.debug(
"Could not make room for next data set, waiting for room...")
# We couldn't make room so we don't try and upload the next artifact
return None
filename = f"{self.strategy_id}-{partition.idx}.csv"
df_to_upload = partition.extract_df(self._df)
res = self._df_to_artifact(df_to_upload, filename)
partition.update_ctx({ARTIFACT: res.id})
self._strategy.save()
return res
def _df_to_artifact(self, df: pd.DataFrame, filename: str) -> ArtifactResult:
with tempfile.TemporaryDirectory() as tmp:
target_file = str(Path(tmp) / filename)
df.to_csv(target_file, index=False)
artifact_id = self._project.upload_artifact(target_file)
return ArtifactResult(id=artifact_id, record_count=len(df))
@_needs_load
def train_partition(
self, partition: Partition, artifact: ArtifactResult
) -> Optional[str]:
attempt = partition.ctx.get(ATTEMPT, 0) + 1
model_config = deepcopy(self._model_config)
data_source = None
if "synthetics" in model_config["models"][0].keys():
model_config["models"][0]["synthetics"]["generate"] = {
"num_records": artifact.record_count,
"max_invalid": None,
}
# If we're trying this model for a second+ time, we reduce the vocab size to
# utilize the char encoder in order to give a better chance and success
if attempt > 1:
model_config["models"][0]["synthetics"]["params"]["vocab_size"] = 0
# If this partition is for the first-N headers and we have known seed headers, we have to
# modify the configuration to account for the seed task.
if partition.columns.seed_headers:
model_config["models"][0]["synthetics"]["task"] = {
"type": "seed",
"attrs": {"fields": partition.columns.seed_headers},
}
elif "ctgan" in model_config["models"][0].keys():
pass
model = self._project.create_model_obj(
model_config=model_config, data_source=artifact.id
)
model = _maybe_submit_job(model)
if model is None:
return None
partition.ctx.update(
{
STATUS: model.status,
ARTIFACT: artifact.id,
MODEL_ID: model.model_id,
ATTEMPT: attempt,
}
)
self._strategy.save()
logger.info(
f"Started model: {model.print_obj['model_name']} " f"source: {artifact.id}"
)
return model.model_id
@_needs_load
def run_partition(
self, partition: Partition, gen_payload: GenPayload
) -> Optional[str]:
"""
Run a record handler for a model and return the job id.
NOTE: This assumes the partition is successfully trained and has an
available model.
"""
handler_dict = partition.ctx.get(HANDLER)
if handler_dict is None:
partition.ctx[HANDLER] = {}
attempt = partition.ctx.get(HANDLER).get(ATTEMPT, 0) + 1
model_id = partition.ctx.get(MODEL_ID)
# Hydrate our trained model so we can start the handler
model_obj = Model(self._project, model_id=model_id)
# Create and start our handler to generate data
handler_obj = model_obj.create_record_handler_obj(
data_source=gen_payload.seed_artifact_id,
params={
"num_records": gen_payload.num_records,
"max_invalid": gen_payload.max_invalid,
},
)
handler_obj = _maybe_submit_job(handler_obj)
if handler_obj is None:
return None
_ctx_update = {
ATTEMPT: attempt,
ARTIFACT: gen_payload.seed_artifact_id,
NUM_RECS: gen_payload.num_records,
STATUS: handler_obj.status,
HANDLER_ID: handler_obj.record_id,
}
partition.ctx[HANDLER].update(_ctx_update)
self._strategy.save()
logger.info(
f"Generating {gen_payload.num_records} records from model: {model_obj.print_obj['model_name']}"
)
return handler_obj.record_id
@_needs_load
def train_next_partition(self) -> Optional[str]:
start_job = False
for partition in self._strategy.partitions:
status = partition.ctx.get(STATUS) # type: Status
# If we've never done a job for this partition, we should start one
if status is None:
logger.info(
f"Partition {partition.idx} is new, starting model creation"
)
start_job = True
# If the job failed, should we try again?
elif (
status in (Status.ERROR, Status.LOST)
and partition.ctx.get(ATTEMPT, 0) < self._error_retry_limit
):
logger.info(
f"Partition {partition.idx} status: {status.value}, re-attempting job"
)
start_job = True
if start_job:
artifact = self._partition_to_artifact(partition)
if artifact.id is None:
return None
return self.train_partition(partition, artifact)
@_needs_load
def run_next_partition(self, gen_payload: GenPayload) -> Optional[str]:
start_job = False
for partition in self._strategy.partitions:
status = partition.ctx.get(HANDLER, {}).get(STATUS) # type: Status
attempt_count = partition.ctx.get(HANDLER, {}).get(ATTEMPT, 0)
if status is None:
logger.info(f"Generating data for partition {partition.idx}")
start_job = True
elif (
status in (Status.ERROR, Status.LOST)
and attempt_count < self._error_retry_limit
):
logger.info(
f"Partition {partition.idx} has status {status.value}, re-attempting generation"
)
start_job = True
if start_job:
use_seeds = False
# If this partition has seed fields and we were given seeds, we need to upload
# the artifact first.
if partition.columns.seed_headers and isinstance(
gen_payload.seed_df, pd.DataFrame
):
# NOTE(jm): If we've tried N-1 attempts with seeds and the handler has continued
# fail then we should not use seeds to at least let the handler try to succeed.
# One example of this happening would be when a partition's model receives seeds
# where the values of the seeds were not in the training set (due to partitioning).
if attempt_count == self._error_retry_limit - 1:
logger.info(
f"WARNING: Disabling seeds for partition {partition.idx} due to previous failed generation attempts..."
)
else:
logger.info(
"Partition has seed fields, uploading seed artifact..."
)
use_seeds = True
removed_artifact = self._remove_unused_artifact()
if removed_artifact is None:
logger.info(
"Could not start generation with seeds, an old artifact could not be removed"
)
return None
filename = f"{self.strategy_id}-seeds-{partition.idx}.csv"
artifact = self._df_to_artifact(
gen_payload.seed_df, filename)
new_payload = GenPayload(
num_records=gen_payload.num_records,
max_invalid=gen_payload.max_invalid,
seed_artifact_id=artifact.id if use_seeds else None,
)
return self.run_partition(partition, new_payload)
@_needs_load
def clear_partition_runs(self):
"""
Partitions should only be trained until they are 'completed', however we can run
a partition any number of times. Before we do that, we want to go through and
"""
for partition in self._strategy.partitions:
partition.ctx[HANDLER] = {}
def _gather_statuses(self, statuses: List[Status]) -> List[Partition]:
out = []
for partition in self._strategy.partitions:
status = partition.ctx.get(STATUS)
if status is None:
continue
if status in statuses:
out.append(partition)
return out
@_needs_load
def is_done(self, *, handler: bool = False) -> bool:
done = 0
for p in self._strategy.partitions:
if handler:
ctx_base = p.ctx.get(HANDLER, {})
else:
ctx_base = p.ctx
status = ctx_base.get(STATUS)
attempt = ctx_base.get(ATTEMPT, 0)
if status is None:
continue
if status in (Status.COMPLETED, Status.CANCELLED):
done += 1
elif (
status in (Status.ERROR, Status.LOST)
and attempt >= self._error_retry_limit
):
done += 1
return done == len(self._strategy.partitions)
@_needs_load
def train_all_partitions(self):
logger.info(f"Processing {len(self._strategy.partitions)} partitions")
while True:
self._update_job_status()
if not self.has_capacity:
logger.debug("At active capacity, waiting for more...")
time.sleep(10)
continue
model_id = self.train_next_partition()
if model_id:
continue
if self.is_done():
break
time.sleep(10)
logger.info(dict(self._status_counter))
@_needs_load
def _get_synthetic_data(self, job_type: str, artifact_type: str) -> pd.DataFrame:
if job_type == "model":
self._update_job_status()
num_completed = self._status_counter.get(Status.COMPLETED, 0)
elif job_type == "run":
self._update_handler_status()
num_completed = self._handler_status_counter.get(
Status.COMPLETED, 0)
else:
raise ValueError("invalid job_type")
if num_completed != self._strategy.partition_count:
raise RuntimeError(
"Not all partitions are completed, cannot fetch synthetic data from trained models"
)
# We will have at least one column-wise DF, this holds
# one DF for each header cluster we have
df_chunks = {
i: | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from io import StringIO
import math
import jieba
import jieba.posseg as psg
from gensim import corpora, models
from jieba import analyse
import functools
from pymongo import MongoClient
import pandas as pd
import re
def get_stopword_list():
stop_word_path = './base/chinese_stopword.txt'
stopword_list = [sw.replace('\n', '')
for sw in open(stop_word_path, encoding='utf-8').readlines()]
return stopword_list
# 分词方法
def seg_to_list(sentence, pos):
if not pos:
# 不进行词性标注的分词方法
seg_list = jieba.cut(sentence)
else:
# 进行词性标注的分词方法
seg_list = psg.cut(sentence)
return seg_list
# 去除干扰词,根据pos判断是否过滤除名词外的其他词性,再判断词是否在停用词表中,长度是否大于等于2等。
def word_filter(seg_list, pos):
stopword_list = get_stopword_list()
filter_list = []
# 根据pos参数选择是否词性过滤
# 不进行词性过滤,则将词性都标记为n,表示全部保留
for seg in seg_list:
if not pos:
word = seg
flag = 'n'
else:
word = seg.word
flag = seg.flag
if not flag.startswith('n'):
continue
# 过滤高停用词表中的词,以及长度为<2的词
if not word in stopword_list and len(word) > 1:
filter_list.append(word)
return filter_list
# 数据加载
def load_data(pos=True, corpus_path='./base/chinese_stopword.txt'):
doc_list = []
for line in open(corpus_path, 'r', encoding='utf-8'):
content = line.strip()
seg_list = seg_to_list(content, pos)
filter_list = word_filter(seg_list, pos)
doc_list.append(filter_list)
return doc_list
# idf值统计方法
def train_idf(doc_list):
idf_dic = {}
# 总文档数
tt_count = len(doc_list)
# 每个词出现的文档数
for doc in doc_list:
for word in set(doc):
idf_dic[word] = idf_dic.get(word, 0.0) + 1.0
# 按公式转换为idf值,分母加1进行平滑处理
for k, v in idf_dic.items():
idf_dic[k] = math.log(tt_count / (1.0 + v))
# 对于没有在字典中的词,默认其尽在一个文档出现,得到默认idf值
default_idf = math.log(tt_count / (1.0))
return idf_dic, default_idf
# topK
def cmp(e1, e2):
import numpy as np
res = np.sign(e1[1] - e2[1])
if res != 0:
return res
else:
a = e1[0] + e2[0]
b = e2[0] + e1[0]
if a > b:
return 1
elif a == b:
return 0
else:
return -1
# TF-IDF类
class TfIdf(object):
# 训练好的idf字典,默认idf值,处理后的待提取文本,关键词数量
def __init__(self, idf_dic, default_idf, word_list, keyword_num):
self.word_list = word_list
self.idf_dic, self.default_idf = idf_dic, default_idf
self.tf_dic = self.get_tf_dic()
self.keyword_num = keyword_num
# 统计tf值
def get_tf_dic(self):
tf_dic = {}
for word in self.word_list:
tf_dic[word] = tf_dic.get(word, 0.0) + 1.0
tt_count = len(self.word_list)
for k, v in tf_dic.items():
tf_dic[k] = float(v) / tt_count
return tf_dic
# 按公式计算tf-idf
def get_tfidf(self):
tfidf_dic = {}
for word in self.word_list:
idf = self.idf_dic.get(word, self.default_idf)
tf = self.tf_dic.get(word, 0)
tfidf = tf * idf
tfidf_dic[word] = tfidf
# 根据tf-idf排序,取排名前keyword_num的词作为关键词
list_keywords = []
for k, v in sorted(tfidf_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:
list_keywords.append(k)
print(k + "/", end='')
print()
return list_keywords
# 主题模型
class TopicModel(object):
#
def __init__(self, doc_list, keyword_num, model="LSI", num_topics=4):
# 使用gensim接口,将文本转为向量化表示
self.dictionary = corpora.Dictionary(doc_list)
# 使用BOW模型向量化
corpus = [self.dictionary.doc2bow(doc) for doc in doc_list]
# 对每个词,根据tf-idf进行加权,得到加权后的向量表示
self.tfidf_model = models.TfidfModel(corpus)
self.corpus_tfidf = self.tfidf_model[corpus]
self.keyword_num = keyword_num
self.num_topics = num_topics
# 选择加载的模型
if model == 'LSI':
self.model = self.train_lsi()
else:
self.model = self.train_lda()
# 得到数据集的主题-词分布
word_dic = self.word_dictionary(doc_list)
self.wordtopic_dic = self.get_wordtopic(word_dic)
def train_lsi(self):
lsi = models.LsiModel(
self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
return lsi
def train_lda(self):
lda = models.LdaModel(
self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
return lda
def get_wordtopic(self, word_dic):
wordtopic_dic = {}
for word in word_dic:
single_list = [word]
wordcorpus = self.tfidf_model[self.dictionary.doc2bow(single_list)]
wordtopic = self.model[wordcorpus]
wordtopic_dic[word] = wordtopic
return wordtopic_dic
# 词空间构建方法和向量化方法,在没有gensim接口时的一般处理方法
def word_dictionary(self, doc_list):
dictionary = []
for doc in doc_list:
dictionary.extend(doc)
dictionary = list(set(dictionary))
return dictionary
def doc2bowvec(self, word_list):
vec_list = [1 if word in word_list else 0 for word in self.dictionary]
return vec_list
# 计算词的分布和文档的分布的相似度,取相似度最高的keyword_num个词作为关键词
def get_simword(self, word_list):
sentcorpus = self.tfidf_model[self.dictionary.doc2bow(word_list)]
senttopic = self.model[sentcorpus]
# 余弦相似度计算
def calsim(l1, l2):
a, b, c = 0.0, 0.0, 0.0
for t1, t2 in zip(l1, l2):
x1 = t1[1]
x2 = t2[1]
a += x1 * x1
b += x1 * x1
c += x2 * x2
sim = a / math.sqrt(b * c) if not (b * c) == 0.0 else 0.0
return sim
# 计算输入文本和每个词的主题分布相似度
sim_dic = {}
for k, v in self.wordtopic_dic.items():
if k not in word_list:
continue
sim = calsim(v, senttopic)
sim_dic[k] = sim
for k, v in sorted(sim_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:
print(k + "/ ", end='')
print()
def tfidf_extract(word_list, pos=False, keyword_num=10):
doc_list = load_data(pos)
idf_dic, default_idf = train_idf(doc_list)
tfidf_model = TfIdf(idf_dic, default_idf, word_list, keyword_num)
return tfidf_model.get_tfidf()
def textrank_extract(text, pos=False, keyword_num=10):
textrank = analyse.textrank
keywords = textrank(text, keyword_num)
# 输出抽取出的关键词
for keyword in keywords:
print(keyword, end='/')
return keywords
# print()
def topic_extract(word_list, model, pos=False, keyword_num=10):
doc_list = load_data(pos)
topic_model = TopicModel(doc_list, keyword_num, model=model)
topic_model.get_simword(word_list)
class key_words:
def __init__(self):
self.client = MongoClient() # 默认连接 localhost 27017
self.db = self.client.chatlog
self.post = self.db.vczh
def get_all_text(self):
word_list = []
for i in self.post.find({}, {'_id': 0, 'time': 1, 'text': 1, }):
i['text'] = i['text'][0]
word_list.append(i)
df = | pd.DataFrame(word_list) | pandas.DataFrame |
import pandas as pd
import math
import sys
import numpy as np
from process.validate_csv_data import check_yearly_records, check_range_records, mapping_id
def create_csv_military_militaryUnit(read_path, output_path, yearly_file, range_file):
"""
create military_军事.csv, militaryUnit_军事单位.csv that will be loaded into database.
Process data in "Military, Politics and Management - Range0.csv", "Military, Politics and Management - Yearly.csv" files to “militory_军事.csv”, militoryUnit_军事单位.csv
Categories for military table is read from "Database Data" directory
Categories are predefined and created.
If categories are changed, military categories in Category.py should be changed first.
:param read_path: path to directory contains "Military, Politics and Management - Range0.csv", "Military, Politics and Management - Yearly.csv"
:param output_path: path to directory stores "military_军事.csv", "militaryUnit_军事单位.csv"
:param yearly_file: file contains military yearly data
:param range_file: file contains military range data
"""
yearly_df = pd.read_csv(read_path + "/" + yearly_file)
range_df = pd.read_csv(read_path + "/" + range_file)
yearly_df = yearly_df.dropna(axis=0, how='all')
range_df = range_df.dropna(axis=0, how='all')
yearly_df.drop_duplicates(inplace=True)
range_df.drop_duplicates(inplace=True)
# validate data
print("Validate militory Yearly data")
correct = check_yearly_records(yearly_df, range(1949, 2020))
print("Validate militory Range data")
correct = check_range_records(range_df)
if not correct:
sys.exit("Correct records first.")
else:
print("Finish validate.")
# create level1 and level2 category
for column in ['Division1', 'Division2']:
yearly_df[column] = yearly_df[column].where(yearly_df[column].notnull(), "")
range_df[column] = range_df[column].where(range_df[column].notnull(), "")
yearly_df['level1'] = yearly_df['Category']
yearly_df['level2'] = yearly_df['Division1'] + yearly_df['Division2']
range_df['level1'] = range_df['Category']
range_df['level2'] = range_df['Division1'] + range_df['Division2']
# add "Unit" column to the yearly_df
yearly_df["Unit"] = None # create a new empty column "Unit"
unit_temp = []
for row in yearly_df['level1'].values.tolist():
if row == "村民纠纷 Number of Civil Mediations":
unit_temp.append("解决件数 Number of Resolved Mediations")
elif row == "共产党员 CCP Membership":
unit_temp.append("人数 Number of People")
elif row == "阶级成分 Class Status":
unit_temp.append("户数 Number of Households")
elif row == "入伍 Military Enlistment":
unit_temp.append("人数 Number of People")
elif row == "新党员 New CCP Membership":
unit_temp.append("人数 Number of People")
elif row == "刑事案件 Number of Reported Crimes":
unit_temp.append("发生件数 Number of Cases Happened")
else: # set default unit to number of people
unit_temp.append("人数 Number of People")
yearly_df["Unit"] = unit_temp
print("added unit column to military yearly table.")
#yearly_df.to_csv(output_path + "test.csv",encoding='utf-8-sig')
# add "Unit" column to the range_df
count = 0 # count number of rows
range_df["Unit"] = None # create a new empty column "Unit"
unit_temp = []
for row in range_df['level1'].values.tolist():
if row == "村民纠纷 Number of Civil Mediations":
unit_temp.append("解决件数 Number of Resolved Mediations")
elif row == "共产党员 CCP Membership":
unit_temp.append("人数 Number of People")
elif row == "阶级成分 Class Status":
unit_temp.append("户数 Number of Households")
elif row == "入伍 Military Enlistment":
unit_temp.append("人数 Number of People")
elif row == "新党员 New CCP Membership":
unit_temp.append("人数 Number of People")
elif row == "刑事案件 Number of Reported Crimes":
unit_temp.append("发生件数 Number of Cases Happened")
else: # set default unit to number of people,should not work
unit_temp.append("人数 Number of People")
range_df["Unit"] = unit_temp
print("added unit column to population range_df table.")
#range_df.to_csv(output_path + "test.csv",encoding='utf-8-sig')
# transfer yearly_df to dictionary
yearly_data = {}
for column in yearly_df.columns:
yearly_data[column] = yearly_df[column].values.tolist()
# transfer range_df to dictionary
range_data = {}
for column in range_df.columns:
range_data[column] = range_df[column].values.tolist()
# merge yearly_df and range_df into df_yearly_range
yearly_and_range = {
'村志代码 Gazetteer Code': [],
'level1': [],
'level2': [],
'Start Year': [],
'End Year': [],
'Data': [],
'Unit': []}
print("process {} records in {} file".format(len(yearly_data['村志代码 Gazetteer Code']), yearly_file))
# select and store not null records at yearly
for i in range(len(yearly_data['村志代码 Gazetteer Code'])):
for year in range(1949, 2020):
# skip null records
if math.isnan(yearly_data[str(year)][i]):
continue
# store gazetteer code, categories, unit
for key in ['村志代码 Gazetteer Code', 'level1', 'level2', 'Unit']:
yearly_and_range[key].append(yearly_data[key][i])
# store data
yearly_and_range['Data'].append(yearly_data[str(year)][i])
# store start year, end year
yearly_and_range['Start Year'].append(year)
yearly_and_range['End Year'].append(year)
print("process {} records in {} file".format(len(range_data['村志代码 Gazetteer Code']), range_file))
# store range records
for i in range(len(range_data['村志代码 Gazetteer Code'])):
# store gazetteer code, categories, unit, start year, end year, data
for key in ['村志代码 Gazetteer Code', 'level1', 'level2', 'Start Year', 'End Year', 'Data', 'Unit']:
yearly_and_range[key].append(range_data[key][i])
# create df stores yearly and range data
yearly_and_range_df = pd.DataFrame(yearly_and_range)
# --- append category id ---
# group by categories
groupby_categories = yearly_and_range_df.groupby(['level1', 'level2'])
population_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 11:30:03 2018
@author: gary.allison
These routines take the pre-processed injection results and try to match
API numbers
"""
import pandas as pd
import numpy as np
import pandas.api.types as ptypes
from processInjectionInput import processAllFiles
##### --------------------------------------------------
#### Input file definitions
##### --------------------------------------------------
# set data dirs for input files and for resulting output files
datadir = './sources/'
outdir = './out/'
indir = datadir+'OH_injection/'
### metadata sources ###
SWDfn = indir+'Copy of SWD locations - July_2018.xls'
ODNR_permit_pickle = outdir+'ODNR_permit.pkl'
ODNR_injection_pickle = outdir+'ODNR_injection.pkl'
xlatefn = 'xlateAPI.txt'
xlate_excel = 'xlateAPI.xls'
pre_proc_out = outdir+'injection_tall_pre.csv'
inj_meta = outdir+'injection_meta_list.csv'
tempf = outdir+'temp.csv'
### --------------------------------------------------------------
def getTallSet(fn=pre_proc_out):
return pd.read_csv(fn)
ppout = pd.read_csv(pre_proc_out)
def prepInjData(fn=pre_proc_out):
ppout = pd.read_csv(pre_proc_out)
ppAPI = ppout.groupby(['API10'],as_index=False)['CompanyName'].last()
assert len(ppAPI) == len(ppAPI.API10.unique())
return ppAPI
def prepSWD(fn=SWDfn):
SWD_df = pd.read_excel(fn)
SWD_df['API10'] = SWD_df['API #'].astype('str').str[0:10]
SWD_df.API10 = SWD_df.API10.astype('str')
# note that lat/lon are reversed in the file, so I fix in the columns
SWD_df.columns = ['API','Owner','WellName','County','Township','Longitude','Latitude','WellStatus','API10']
assert len(SWD_df) == len(SWD_df.API10.unique())
return SWD_df
def prepODNRinjList(fn=ODNR_injection_pickle):
ODNRi = pd.read_pickle(fn)
#print(f'ODNRi col: {ODNRi.columns}')
assert len(ODNRi) == len(ODNRi.API10.unique())
return ODNRi
def prepODNRPermitList(fn=ODNR_permit_pickle):
ODNRp = pd.read_pickle(fn)
#print(ODNRp.columns)
cols = ['API','County','Owner', 'Township', 'PermitDate', 'WellName',
'WellNumber', 'Latitude', 'Longitude', 'API10']
ODNRp.columns = cols
ODNRp = ODNRp.groupby('API10',as_index=False)['API','County','Owner',
'Township', 'PermitDate', 'WellName',
'WellNumber', 'Latitude', 'Longitude'].last()
#print(f'Len of ODNRp = {len(ODNRp)}')
#assert len(ODNRp) == len(ODNRp.API10.unique())
return ODNRp
def joinMetaWithInjAPI(metadf,injdf,sourcename):
mg = pd.merge(metadf,injdf,how='outer',on='API10',indicator=True,validate='1:1')
mg.sort_values(by='API10').to_csv(tempf)
match = mg[mg._merge=='both'].copy()
match = match.drop('_merge',axis=1)
leftover = mg[mg._merge=='right_only'].copy()
leftover = leftover.filter(['API10','CompanyName'],axis=1)
match['meta_source'] = sourcename
print(f'input={len(injdf)}. For {sourcename}: matched={len(match)}, unmatched={len(leftover)}')
return match,leftover
def makeWholeSet():
ppAPI = prepInjData()
swd = prepSWD()
ODNRi = prepODNRinjList()
ODNRp = prepODNRPermitList()
match1,leftover1 = joinMetaWithInjAPI(swd,ppAPI,'SWD-july2018')
match2,leftover2 = joinMetaWithInjAPI(ODNRi,leftover1,'ODNR Injection scrape')
match3,leftover3 = joinMetaWithInjAPI(ODNRp,leftover2,'ODNR Permit scrape')
leftover3['meta_source'] = 'No match'
wholeset = pd.concat([match1,match2,match3,leftover3],sort=True)
wholeset.sort_values(by='API10').to_csv(inj_meta)
return wholeset
def addLatLonToTall():
whole = makeWholeSet()
whole = whole.filter(['API10','Latitude','Longitude'],axis=1)
whole.Latitude = whole.Latitude.fillna(40)
whole.Latitude = whole.Latitude.fillna(-83)
ppout = getTallSet()
mg = pd.merge(whole,ppout,on='API10',how='right',validate='1:m')
return mg
def getNoMatchSet():
whole = makeWholeSet()
return whole[whole.meta_source=='No match'].copy()
def makeXlateSpreadsheet(fn=xlatefn):
xlate = pd.read_csv(fn,sep='|')
print(len(xlate))
xlate.to_excel(xlate_excel)
def getNoMatchRecLen():
nm = getNoMatchSet()
ppout = | pd.read_csv(pre_proc_out) | pandas.read_csv |
import numpy as np
import pandas as pd
# import xarray as xr
# import xskillscore
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.losses import Loss
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.close("all")
from scoringRules import es_sample, crps_sample
from igep_models_all_tem_noplot import igep
import tensorflow.compat.v1 as tfv
tfv.disable_v2_behavior()
DIM = 5 # dimension of target values
dist_samples = pd.read_csv('/home/chen_jieyu/IGEP/dist_5samples.csv', header = None)
# Read data
path = '/home/chen_jieyu/IGEP/ens_fc_t2m_complete.feather'
t2m_ens_complete = pd.read_feather(path)
path_add = '/home/chen_jieyu/IGEP/tem_additional_predictors.feather'
t2m_add_complete = pd.read_feather(path_add)
callback = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0.002, patience = 3, restore_best_weights = True)
# loop
x = 100
for k in range(x):
station_sample = dist_samples.iloc[k,]
ens_sample = t2m_ens_complete[t2m_ens_complete['station'].isin(station_sample)]
dateobs_count = ens_sample.groupby('date')['date'].count()
dates = dateobs_count.index
used_dates = dates[dateobs_count == DIM]
used_ens_sample = ens_sample[ens_sample['date'].isin(used_dates)]
add_sample = t2m_add_complete[t2m_add_complete['station'].isin(station_sample)]
used_add_sample = add_sample[add_sample['date'].isin(used_dates)]
# LOAD DATA
# t2m data
t2m_obs = used_ens_sample['obs']
t2m_obs.index = used_ens_sample['date']
data_obs = t2m_obs
# set initial training and test dates
train_dateindex = ((t2m_obs.index.year != 2016) & (t2m_obs.index.year != 2015))
val_dateindex = (t2m_obs.index.year == 2015)
test_dateindex = (t2m_obs.index.year == 2016)
# Predictions
t2m_ens = used_ens_sample.iloc[:,3:53]
t2m_ens.index = used_ens_sample['date']
data_ens = t2m_ens
# added predictors
add_dim = 37
t2m_add = used_add_sample.loc[:,["d2m_mean","d2m_var",
"q_pl850_mean","q_pl850_var",
"tcc_mean","tcc_var",
"u_pl850_mean","u_pl850_var",
"v_pl850_mean","v_pl850_var",
"sshf_mean","sshf_var",
"slhf_mean","slhf_var",
"u10_mean","u10_var",
"v10_mean","v10_var",
"cape_mean","cape_var",
"sp_mean","sp_var",
"u_pl500_mean","u_pl500_var",
"v_pl500_mean","v_pl500_var",
"gh_pl500_mean","gh_pl500_var",
"ssr_mean","ssr_var",
"str_mean","str_var",
"lat","lon","alt","orog","sin_yday"]]
t2m_add.index = used_add_sample['date']
data_add = t2m_add
# SPLIT DATA
# get training and test data
obser = data_obs.copy()
pred = data_ens.copy()
addpre = data_add.copy()
dim = DIM
######### standardization
scaler = preprocessing.StandardScaler().fit(obser[train_dateindex].values.reshape(-1,1))
stand_obs = scaler.transform(obser.values.reshape(-1,1)).reshape(-1)
obser.iloc[:] = stand_obs
for i in range(pred.shape[1]):
pred.iloc[:,i] = scaler.transform(pred.iloc[:,i].values.reshape(-1,1))
ens_mu = pred.mean(axis=1)
ens_sigma = pred.var(axis=1)
ens_max = pred.max(axis=1)
ens_min = pred.min(axis=1)
ens_spread = ens_max - ens_min
for i in range(addpre.shape[1]-1):
scaler_i = preprocessing.StandardScaler().fit(addpre.iloc[train_dateindex,i].values.reshape(-1,1))
addpre.iloc[:,i] = scaler_i.transform(addpre.iloc[:,i].values.reshape(-1,1))
add_pre_mu = addpre.loc[:,["d2m_mean","q_pl850_mean","tcc_mean","u_pl850_mean","v_pl850_mean",
"sshf_mean","slhf_mean","u10_mean","v10_mean","cape_mean","sp_mean",
"u_pl500_mean","v_pl500_mean","gh_pl500_mean","ssr_mean","str_mean"]]
add_pre_sigma = addpre.loc[:,["d2m_var","q_pl850_var","tcc_var","u_pl850_var","v_pl850_var",
"sshf_var","slhf_var","u10_var","v10_var","cape_var","sp_var",
"u_pl500_var","v_pl500_var","gh_pl500_var","ssr_var","str_var"]]
n_add = 16
# Inputs
x_train_m323 = [np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
addpre[train_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_val_m323 = [np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
addpre[val_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_test_m323 = [np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
addpre[test_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
y_train = obser[train_dateindex].values.reshape((-1, dim, 1))
y_val = obser[val_dateindex].values.reshape((-1, dim, 1))
y_test = obser[test_dateindex].values.reshape((-1, dim, 1))
y_train_tmp = y_train
y_val_tmp = y_val
y_test_tmp = y_test
testy = data_obs[test_dateindex]
# MODEL
BATCH_SIZE = 64
LATENT_DIST = "uniform" # or normal # family of latent varaible distributions
DIM_LATENT = 20 # number of latent variables
learning_rate = 0.01
EPOCHS = 50
N_SAMPLES_TRAIN = 50 # number of samples drawn during training
N_SAMPLES_TEST = 100
VERBOSE = 1
n_layers = 2
n_nodes = 25
ens_m3_output_combine_l = pd.DataFrame()
ens_m3_output_combine_s = pd.DataFrame()
for loop in range(5):
tfv.reset_default_graph()
# initialize model
mdl_m3 = igep(dim_out = DIM,
dim_in_mean = x_train_m323[0].shape[-1],
dim_in_std = x_train_m323[1].shape[-1],
dim_in_features = x_train_m323[2].shape[-1],
dim_latent = DIM_LATENT,
n_samples_train = N_SAMPLES_TRAIN,
layer_number = n_layers,
nodes_number = n_nodes,
model_type = 326,
latent_dist = LATENT_DIST)
#% FIT
mdl_m3.fit(x = x_train_m323,
y = y_train_tmp,
batch_size = BATCH_SIZE,
epochs = EPOCHS,
verbose = VERBOSE,
callbacks = [callback],
validation_split = 0.0,
validation_data = (x_val_m323, y_val_tmp),
sample_weight = None,
learningrate = learning_rate)
# predict and append to list
S_m3 = []
S_m3.append(mdl_m3.predict(x_test_m323, N_SAMPLES_TEST))
pre_dat = np.concatenate(S_m3, axis = 0)
fcst = scaler.inverse_transform(np.reshape(pre_dat, (pre_dat.shape[0]*pre_dat.shape[1],-1)))
ens_m3_output = pd.DataFrame(fcst, index=testy.index)
ens_m3_output_combine_l = pd.concat([ens_m3_output_combine_l, ens_m3_output], axis=1)
ens_m3_output_combine_s = pd.concat([ens_m3_output_combine_s, ens_m3_output.iloc[:, :10] ], axis=1)
ens_m3_long_result = | pd.concat([testy, ens_m3_output_combine_l], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Module containing operation for calculating change in price of a pandas dataframe
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from pandas import DataFrame as DataFrame_
from source.util import Assertor, Tracking, Debugger
from .operation import Operation
class RateOfChange(Operation):
"""
Implementation of Operation for calculating rate of change in vector
"""
@Tracking
def __init__(self, dataframe: dict, desc: str):
"""
Constructor / instantiating the class
Parameters
----------
dataframe : dict
dataframe as a dictionary to calculate changes in vector
desc : str
description of operation
"""
self.name = self.__class__.__name__
Assertor.assert_data_types([dataframe, desc], [dict, str])
super().__init__(name=self.name, desc="id: {}".format(desc))
self.dataframe = dataframe
@Debugger
def run(self):
"""
method for running the operation
"""
change = DataFrame_(self.dataframe).iloc[:, -1].astype(str).str.replace(
u"\xa0", "").str.replace(" kr", "").str.replace(" ", "")
final_change = | DataFrame_.from_dict(self.dataframe) | pandas.DataFrame.from_dict |
#! /bin/env python3
### This module is designed to identify the genomic data files in our laboratory's data directories
##by <NAME>
## April 7, 2015, script_version 0.6
script_version = 0.13
script_subversion = 22
##Note: Need to test the ability to link read files to the assembly, or inventory them independently
#pylint: disable=global-statement, broad-except
import re
import os
import pandas as pd
from Bio import SeqIO
import sys
import gzip
import zlib
import stat
import time
from shutil import copyfile
from collections import defaultdict
import functools
import urllib.request
import utilities
import seq_utilities
import NGS_data_utilities
# import seq_utilities
_verbose = False ##Set to True by Debug
Repository = '' #This must be set either by the user or from the settings file
ASSEMBLY_DIR_REPO = 'assemblies'
READ_DIR_REPO = 'reads'
INVENTORY_FILE_BASE = 'inventory.tab'
READMAP_FILE = 'read_assembly_map.tab'
read_ext = '.fastq.gz'
# read454_Ext = '.sff'
my_file = __file__
if os.path.islink(my_file):
my_file = os.path.realpath(my_file)
SCRIPT_DIR, SCRIPT_NAME = os.path.split(my_file)
SCRIPT_DIR = os.path.abspath(SCRIPT_DIR)
############################
#### Functions for organizing files (e.g. interpreting names)
#########################
def getBaseID(sample_name,version):
return "{}_v{}".format(sample_name,version)
# Helper functions to improve the CLI for outside scripts
def default_list(destination_directory):
if os.path.isdir(destination_directory):
result = os.path.join(destination_directory,'genome_list.tab')
else:
result = None
return result
def placeAssembliesIntoDataFrame(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True,deep_search=True):
return place_WGS_records_into_dataframe(argv,GO_settings,repository,rename_duplicateID,drop_duplicate_files,is_reads=False,deep_search=deep_search)
def placeReadsIntoDataFrame(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True):
return place_WGS_records_into_dataframe(argv,GO_settings,repository,rename_duplicateID,drop_duplicate_files,is_reads=True)
def place_WGS_records_into_dataframe(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True,is_reads=False,deep_search=True):
assert len(argv) > 1, "No arguments passed. Failure."
result = None
isolates = None
main_arg = argv[1]
if os.path.exists(main_arg):
if len(argv) == 3: ## Main file and a genome name
if os.path.isfile(main_arg):
genome_name = argv[2]
result = pd.DataFrame({'Lab_ID':[genome_name],'Filename':[main_arg]})
else:
print("usage: {} GenomeFile GenomeName".format(os.path.basename(argv[0])))
print("\tNot a file: {}".format(argv[1]))
print("\tFull path: {}".format(os.path.abspath(argv[1])))
elif len(argv) == 2: #a single argument pointing to a group of files
if os.path.isdir(main_arg): #assemble group list
print('## Scanning genome directory ##')
result = NGS_data_utilities.listReadFilesWithNames(main_arg) if is_reads else NGS_data_utilities.listGenomeFilesWithNames(main_arg,deep_search=deep_search)
print("## Finished scanning directory ## \n")
elif os.path.isfile(main_arg): #read group list
print("## Reading table ##")
df = pd.read_table(main_arg)
print("Table contains {} sequences to analyze".format(len(df)))
if "Filename" in df.columns:
print("\tUser provided assembly files")
result = df.copy()
elif "Lab_ID" in df.columns:
file_type = 'read' if is_reads else 'assembly'
print("\tCould not identify 'Filename' field. Pulling {} files from repository".format(file_type))
isolates = df['Lab_ID'].tolist()
else:
print("Cannot parse file. Please provide a tab-delimited table with headers 'Filename' and 'Lab_ID'")
##Leaves result as none
else:
print("Unable to interpret command line. Too many arguments")
else: #Finally, test if these are a list of isolates
print("The supplied argument is not a directory or file; assuming it is an isolate ID")
isolates = argv[1:]
if result is None and isolates is not None:
if repository is None:
settingDict = get_default_settings(GO_settings)
if settingDict is not None:
repository = settingDict['repository']
if repository is not None and os.path.isdir(repository):
inventory = InventoryReader(repository)
if inventory.valid:
if is_reads:
print("Error: ability to select reads by isolate ID is not supported. Contact developer.")
result = None
else:
gd,_ = inventory.getAssemblyRecords(isolates, ActiveOnly=True)
result= gd[NGS_data_utilities.dfHeaders]
else:
result = None
else:
print("Cannot find repository at {}:".format(repository))
if result is None or len(result)==0:
print("Unable to parse arguments")
else:
if drop_duplicate_files:
result = result.drop_duplicates() ##for those situations where the direcotry was indexed twice... no big deal
##Make sure no two sequence files use the same genome name (this is used to identify intermediate files -- mainly matters for debugging
if rename_duplicateID:
NGS_data_utilities.assignUniqueID(result)
return result
###########################
#### Functions for manipulating the inventory files
###########################
def forcedIntCasting(my_series):
return my_series.str.split('.').str.get(0).astype(int)
##alt strategy is to first cast to float
##Allow exceptions to rise to the next level... though maybe I could return None to indicate failure
##Test that each isolates has one and only one record
def recordsAreCompleteAndUnique(Records,isolates):
valid = True
for i in isolates:
valid = sum(Records['Lab_ID'] == i) == 1
if not valid:
break
return valid
##Active only should only be selected when reading only
def assemblySetup(assembly_file,relative_path=None,ValidOnly=False): ##TODO: the udpate file does not necessarily have all the fields of the assembly frame .. which causes problem
try:
assembly_frame = pd.DataFrame(pd.read_table(assembly_file,dtype=str),copy=True) ##If we're not careful, it will interpret ints as floats and cause bugs
assembly_frame.dropna(how='all',inplace=True)
for c in inventoryHeaders:
if c not in assembly_frame.columns:
assembly_frame[c] = None ##None causes problems with type-casting, below
except IOError:
assembly_frame = pd.DataFrame(columns=inventoryHeaders)
if len(assembly_frame) > 0: ##Make filepaths absolute : all assembly files are in the repository
if relative_path is not None:
inventory_path_join = functools.partial(DeRelativizePath,relative_path)
for c in assFileKeys:
if c in assembly_frame.columns:
good_values = assembly_frame[c].notnull()
assembly_frame.loc[good_values,c] = assembly_frame[good_values][c].apply(inventory_path_join)
#Set index
assembly_frame['Version'] = assembly_frame['Version'].astype(int)
assembly_frame.set_index(['Lab_ID','Version'],drop=False,inplace=True)
#Make temporary changes that affect empty cells; use "valid" to revert them
frame_valid = assembly_frame.notnull()
assembly_frame['Invalid'] = assembly_frame['Invalid'].isin(['True','TRUE','1','1.0','yes','Yes','YES'])
assembly_frame['Active'] = (~assembly_frame['Active'].isin(['False','FALSE','0','0.0','no','No','NO'])) & (assembly_frame['Invalid'] != True)
if ValidOnly:
assembly_frame = assembly_frame[assembly_frame['Invalid'] != True]
assembly_frame['Gaps'] = ~assembly_frame['Gaps'].isin(['False','FALSE','No','NO','no','0'])
##Cast everything to the appropriate datatype
castAssemblyColumns(assembly_frame)
return assembly_frame, frame_valid
##Address this bug: https://github.com/pydata/pandas/issues/4094
def castAssemblyColumns(assembly_frame):
for _,row in inventoryHeadersFrame.iterrows():
field = row["Field"]
field_type = row['dtype']
if field in assembly_frame.columns:
try:
assembly_frame[field] = assembly_frame[field].astype(field_type)
except (ValueError,TypeError):
no_good = True
if field_type is int:
try:
assembly_frame[field] = forcedIntCasting(assembly_frame[field])
except:
no_good = True
else:
no_good =False
print("Forced floating points to ints for {}".format(field))
if no_good:
print("Failed to cast {} to {}".format(field,field_type))
##Active only should only be selected when reading only
def readSetup(read_file,relative_path=None,ValidOnly=False):
try:
read_frame = pd.DataFrame(pd.read_table(read_file,dtype=str),copy=True)
read_frame.dropna(how='all', inplace=True)
for c in readInventoryHeaders:
if c not in read_frame.columns:
read_frame[c] = None
except IOError:
read_frame = pd.DataFrame(columns=readInventoryHeaders)
if len(read_frame) > 0: #Make filepaths absolute: reads must be in GWA or have HTTP
if relative_path is not None:
read_path_join = functools.partial(DeRelativizePath,relative_path)
rf = read_frame
for c in readFileKeys + ['Original_Read1','Original_Read2']:
relative_reads = utilities.avoidItemsThatStartWith(rf,c,'http:') ##Anything that is not HTTP is relative
rf.loc[relative_reads,c] = rf[relative_reads][c].apply(read_path_join)
#Set index
try:
read_frame['Read_Set'] = read_frame['Read_Set'].astype(int)
except:
try:
read_frame['Read_Set'] = forcedIntCasting(read_frame['Read_Set'])
except ValueError:
print("Failed to cast {} to {}".format('Read_Set',int))
read_frame.set_index(['Lab_ID','Read_Set'],drop=False,inplace=True)
#Make temporary changes that affect empty cells; use "valid" to revert them
frame_valid = read_frame.notnull()
read_frame['Invalid'] = read_frame['Invalid'].isin(['True','TRUE','1'])
if ValidOnly:
read_frame = read_frame[read_frame['Invalid'] != True]
return read_frame, frame_valid
def readAssemblyMapSetup(readmap_file):
readmap_frame = pd.read_table(readmap_file,dtype=str) if os.path.isfile(readmap_file) else pd.DataFrame(columns=readAssemblyMapHeaders)
##Cast everything to the appropriate datatype
for _,row in readAssemblyMapFrame.iterrows():
field = row["Field"]
field_type = row['dtype']
try:
readmap_frame[field] = readmap_frame[field].astype(field_type)
except ValueError:
no_good = True
if field_type is int:
try:
readmap_frame[field] = forcedIntCasting(readmap_frame[field])
except:
no_good = True
else:
no_good =False
print("Forced floating points to ints for {}".format(field))
if no_good:
print("Failed to cast {} to {}".format(field,field_type))
return readmap_frame
def DeRelativizePath(start,path):
if path[0] != '/':
result = os.path.normpath(os.path.join(start,path))
else:
result = path
return result
def conditionalRelativePath(start,path):
if path[0] != '/':
raise ValueError("Must work with absolute path, not {}".format(path))
shared = os.path.commonprefix([start,path])
if len(shared) > 1: #Not just '/
result = os.path.relpath(path,start)
else:
result = path #No shared path, so no value in making it relative
return result
def assemblyPackup(assembly_frame,relative_path):
af = assembly_frame.copy()
# ass_rel_path = functools.partial(conditionalRelativePath,start=relative_path)
# is_str = functools.partial(isinstance,classinfo=str)
for c in assFileKeys:
if c in af.columns:
good_values = (af[c].notnull()) & (af[c] != '') & (af[c] != 'nan')
for g in good_values.index:
if good_values[g]:
try:
af.loc[g,c] = conditionalRelativePath(relative_path,af.loc[g,c])
except ValueError as e:
print("Error on {}, index {}".format(c,g))
print(e)
raise
return af
def readPackup(read_frame,relative_path):
rf = read_frame.copy()
# reads_rel_path = functools.partial(conditionalRelativePath,start=relative_path)
for c in readFileKeys:
relative_reads = utilities.avoidItemsThatStartWith(rf,c,'http:') ##Anything that is not HTTP is made relative
for r in relative_reads.index:
if relative_reads[r]:
rf.loc[r,c] = conditionalRelativePath(relative_path,rf.loc[r,c])
# temp = rf[relative_reads][c]
# rf.loc[relative_reads,c] = temp.apply(reads_rel_path)
return rf
############ Class Based Inventory Manager #######################
##This is basically a wrapper for a set of dataframes representing the inventory files that controls a subdirectories in a repository
class Inventory:
#Why arent' these just global constants?
ASSEMBLY_DIR_REPO = 'assemblies'
READ_DIR_REPO = 'reads'
BAM_DIR_REPO = 'BAM'
INVENTORY_FILE_BASE = 'inventory.tab'
READMAP_FILE = 'read_assembly_map.tab'
def __init__(self,repository=None):
self.assembly_frame = self.read_frame = self.contig_frame = self.readmap_frame = None
self.assembly_valid = self.read_valid = None
if repository is None:
repository = get_default_settings(SETTING_PATH)['repository']
self.setRepository(repository)
self.valid = False ##Needs to activate repository by initiating reader or writer
self.readInvTemplate = pd.DataFrame(columns = readInventoryHeaders)
def setRepository(self,repository):
##Set paths
self.repository = os.path.abspath(repository)
self.assembly_directory = os.path.join(self.repository,self.ASSEMBLY_DIR_REPO)
self.read_directory = os.path.join(self.repository,self.READ_DIR_REPO)
self.assembly_file = os.path.join(self.assembly_directory,self.INVENTORY_FILE_BASE)
self.contig_file = utilities.appendToFilename(self.assembly_file,'_contigs')
self.read_file = os.path.join(self.read_directory,self.INVENTORY_FILE_BASE)
self.readmap_file = os.path.join(self.assembly_directory,self.READMAP_FILE)
self.BAM_directory = os.path.join(self.repository,self.BAM_DIR_REPO)
def repositoryExists(self):
result = True
result &= os.path.isdir(self.repository)
result &= os.path.isdir(self.assembly_directory)
result &= os.path.isdir(self.read_directory)
return result
def activateRepository(self,ValidOnly,r_verbose=True): #pylint: disable=attribute-defined-outside-init
result = False
if self.repositoryExists():
try:
if r_verbose or _verbose:
print("Activating repository at "+self.repository)
### Load the frames
self.assembly_frame, self.assembly_valid = assemblySetup(self.assembly_file,self.assembly_directory,ValidOnly=ValidOnly)
vprint('Found {} valid assemblies and {} invalid'.format(sum(~self.assembly_frame.Invalid),sum(self.assembly_frame.Invalid)))
## REad frame
self.read_frame, self.read_valid = readSetup(self.read_file,self.read_directory,ValidOnly=ValidOnly)
vprint('Found {} valid read sets and {} invalid'.format(sum(~self.read_frame.Invalid),sum(self.read_frame.Invalid)))
##Other frames
self.contig_frame = pd.read_table(self.contig_file,dtype=str) if os.path.isfile(self.contig_file) else | pd.DataFrame(columns=inventoryContigHeaders) | pandas.DataFrame |
from docx import Document
from docx.shared import Inches
from docx.enum.text import WD_BREAK
import pandas as pd
from copy import deepcopy
from massive_docx.util import delete_paragraph
class Base:
def _show_styles(self):
for style in self.template.styles:
print(style.name)
def _parse_template(self):
self.template = Document(self.template_name)
def _parse_mailing_list(self):
self.mailing_list = | pd.read_excel(self.mailing_list_name) | pandas.read_excel |
#! /usr/bin/env python
import argparse
import os
import pysam
import pandas as pd
import numpy as np
from scipy.optimize import nnls
import scipy.sparse
import multiprocessing as mp
from hashed_read_genome_array import HashedReadBAMGenomeArray, ReadKeyMapFactory, read_length_nmis, get_hashed_counts
from plastid.genomics.roitools import SegmentChain, positionlist_to_segments
import sys
from time import strftime
parser = argparse.ArgumentParser(description='Use linear regression to identify likely sites of translation. Regression will be performed for ORFs '
'defined by find_orfs_and_types.py using a metagene profile constructed from annotated CDSs. If '
'multiple ribosome profiling datasets are to be analyzed separately (e.g. if they were collected under '
'different drug treatments), then this program should be run separately for each, ideally in separate '
'subfolders indicated by SUBDIR.')
parser.add_argument('bamfiles', nargs='+', help='Path to transcriptome-aligned BAM file(s) for read data')
parser.add_argument('--subdir', default=os.path.curdir,
help='Convenience argument when dealing with multiple datasets. In such a case, set SUBDIR to an appropriate name (e.g. HARR, '
'CHX) to avoid file conflicts. (Default: current directory)')
parser.add_argument('--restrictbystarts', nargs='+',
help='Subdirectory/subdirectories or filename(s) containing regression output to use to restrict ORFs for regression. If a '
'directory or list of directories, file(s) of name REGRESSFILE (regression.h5 by default) will be searched for within them. '
'For use to restrict regression on e.g. CHX or no-drug data based only on positive hits from e.g. HARR or LTM data. '
'Value(s) of MINWSTART indicate the minimum W statistic to require. If multiple directories/files are provided, start '
'sites will be taken from their union.')
parser.add_argument('--minwstart', type=float, nargs='+', default=[0],
help='Minimum W_start statistic to require for regression output in RESTRICTBYSTARTS. If only one value is given, it will be '
'assumed to apply to all; if multiple values are given, the number of values must match the number of values provided for '
'RESTRICTBYSTARTS. Ignored if RESTRICTBYSTARTS not included. (Default: 0)')
parser.add_argument('--orfstore', default='orf.h5',
help='Path to pandas HDF store containing ORFs to regress; generated by find_orfs_and_types.py (Default: orf.h5)')
parser.add_argument('--inbed', default='transcripts.bed', help='Transcriptome BED-file (Default: transcripts.bed)')
parser.add_argument('--offsetfile', default='offsets.txt',
help='Path to 2-column tab-delimited file with 5\' offsets for variable P-site mappings. First column indicates read length, '
'second column indicates offset to apply. Read lengths are calculated after trimming up to MAX5MIS 5\' mismatches. Accepted '
'read lengths are defined by those present in the first column of this file. If SUBDIR is set, this file is assumed to be '
'in that directory. (Default: offsets.txt)')
parser.add_argument('--max5mis', type=int, default=1, help='Maximum 5\' mismatches to trim. Reads with more than this number will be excluded.'
'(Default: 1)')
parser.add_argument('--regressfile', default='regression.h5',
help='Filename to which to output the table of regression scores for each ORF. Formatted as pandas HDF (tables generated include '
'"start_strengths", "orf_strengths", and "stop_strengths"). If SUBDIR is set, this file will be placed in that directory. '
'(Default: regression.h5)')
parser.add_argument('--startonly', action='store_true', help='Toggle for datasets collected in the presence of initiation inhibitor (e.g. HARR, '
'LTM). If selected, "stop_strengths" will not be calculated or saved.')
parser.add_argument('--startrange', type=int, nargs=2, default=[1, 50],
help='Region around start codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 1 50, meaning '
'one full codon before the start is modeled, as are the start codon and the 49 codons following it).')
parser.add_argument('--stoprange', type=int, nargs=2, default=[7, 0],
help='Region around stop codon (in codons) to model explicitly. Ignored if reading metagene from file (Default: 7 0, meaning '
'seven full codons before and including the stop are modeled, but none after).')
parser.add_argument('--mincdsreads', type=int, default=64,
help='Minimum number of reads required within the body of the CDS (and any surrounding nucleotides indicated by STARTRANGE or '
'STOPRANGE) for it to be included in the metagene. Ignored if reading metagene from file (Default: 64).')
parser.add_argument('--startcount', type=int, default=0,
help='Minimum reads at putative translation initiation codon. Useful to reduce computational burden by only considering ORFs '
'with e.g. at least 1 read at the start. (Default: 0)')
parser.add_argument('--metagenefile', default='metagene.txt',
help='File to save metagene profile, OR if the file already exists, it will be used as the input metagene. Formatted as '
'tab-delimited text, with position, readlength, value, and type ("START", "CDS", or "STOP"). If SUBDIR is set, this file '
'will be placed in that directory. (Default: metagene.txt)')
parser.add_argument('--noregress', action='store_true', help='Only generate a metagene (i.e. do not perform any regressions)')
parser.add_argument('--exclude', nargs='+', help='Names of transcript families (tfams) to exclude from analysis due to excessive computational time '
'or memory footprint (e.g. TTN can be so large that the regression never finishes).')
parser.add_argument('-v', '--verbose', action='count', help='Output a log of progress and timing (to stdout). Repeat for higher verbosity level.')
parser.add_argument('-p', '--numproc', type=int, default=1, help='Number of processes to run. Defaults to 1 but more recommended if available.')
parser.add_argument('-f', '--force', action='store_true',
help='Force file overwrite. This will overwrite both METAGENEFILE and REGRESSFILE, if they exist. To overwrite only REGRESSFILE '
'(and not the METAGENEFILE), do not invoke this option but simply delete REGRESSFILE.')
opts = parser.parse_args()
offsetfilename = os.path.join(opts.subdir, opts.offsetfile)
metafilename = os.path.join(opts.subdir, opts.metagenefile)
regressfilename = os.path.join(opts.subdir, opts.regressfile)
if not opts.force:
if os.path.exists(regressfilename):
if os.path.exists(metafilename):
raise IOError('%s exists; use --force to overwrite (will also recalculate metagene and overwrite %s)' % (regressfilename, metafilename))
raise IOError('%s exists; use --force to overwrite' % regressfilename)
restrictbystartfilenames = []
if opts.restrictbystarts:
if len(opts.restrictbystarts) > 1 and len(opts.minwstart) == 1:
opts.minwstart *= len(opts.restrictbystarts) # expand the list to the same number of arguments
if len(opts.minwstart) != len(opts.restrictbystarts):
raise ValueError('--minwstart must be given same number of values as --restrictbystarts, or one value for all')
for restrictbystart in opts.restrictbystarts:
if os.path.isfile(restrictbystart):
restrictbystartfilenames.append(restrictbystart)
elif os.path.isdir(restrictbystart) and os.path.isfile(os.path.join(restrictbystart, opts.regressfile)):
restrictbystartfilenames.append(os.path.join(restrictbystart, opts.regressfile))
else:
raise IOError('Regression file/directory %s not found' % restrictbystart)
if opts.verbose:
sys.stdout.write(' '.join(sys.argv) + '\n')
def logprint(nextstr):
sys.stdout.write('[%s] %s\n' % (strftime('%Y-%m-%d %H:%M:%S'), nextstr))
sys.stdout.flush()
log_lock = mp.Lock()
rdlens = []
Pdict = {}
with open(offsetfilename, 'rU') as infile:
for line in infile:
ls = line.strip().split()
rdlen = int(ls[0])
for nmis in range(opts.max5mis+1):
Pdict[(rdlen, nmis)] = int(ls[1])+nmis # e.g. if nmis == 1, offset as though the read were missing that base entirely
rdlens.append(rdlen)
# Pdict = {(int(ls[0]), nmis): int(ls[1])+nmis for ls in [line.strip().split() for line in infile] for nmis in range(opts.max5mis+1)}
# Pdict = {(ls[0], nmis): ls[1] for ls in [line.strip().split() for line in infile] if opts.maxrdlen >= ls[0] >= opts.minrdlen
# for nmis in range(opts.max5mis+1)}
rdlens.sort()
# hash transcripts by ID for easy reference later
with open(opts.inbed, 'rU') as inbed:
bedlinedict = {line.split()[3]: line for line in inbed}
def _get_annotated_counts_by_chrom(chrom_to_do):
"""Accumulate counts from annotated CDSs into a metagene profile. Only the longest CDS in each transcript family will be included, and only if it
meets the minimum number-of-reads requirement. Reads are normalized by gene, so every gene included contributes equally to the final metagene."""
found_cds = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r',
where="chrom == '%s' and orftype == 'annotated' and tstop > 0 and tcoord > %d and AAlen > %d"
% (chrom_to_do, -startnt[0], min_AAlen),
columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen']) \
.sort_values('AAlen', ascending=False).drop_duplicates('tfam') # use the longest annotated CDS in each transcript family
num_cds_incl = 0 # number of CDSs included from this chromosome
startprof = np.zeros((len(rdlens), startlen))
cdsprof = np.zeros((len(rdlens), 3))
stopprof = np.zeros((len(rdlens), stoplen))
inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles]
gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis))
for (tid, tcoord, tstop) in found_cds[['tid', 'tcoord', 'tstop']].itertuples(False):
curr_trans = SegmentChain.from_bed(bedlinedict[tid])
tlen = curr_trans.get_length()
if tlen >= tstop + stopnt[1]: # need to guarantee that the 3' UTR is sufficiently long
curr_hashed_counts = get_hashed_counts(curr_trans, gnd)
cdslen = tstop+stopnt[1]-tcoord-startnt[0] # cds length, plus the extra bases...
curr_counts = np.zeros((len(rdlens), cdslen))
for (i, rdlen) in enumerate(rdlens):
for nmis in range(opts.max5mis+1):
curr_counts[i, :] += curr_hashed_counts[(rdlen, nmis)][tcoord+startnt[0]:tstop+stopnt[1]]
# curr_counts is limited to the CDS plus any extra requested nucleotides on either side
if curr_counts.sum() >= opts.mincdsreads:
curr_counts /= curr_counts.mean() # normalize by mean of counts across all readlengths and positions within the CDS
startprof += curr_counts[:, :startlen]
cdsprof += curr_counts[:, startlen:cdslen-stoplen].reshape((len(rdlens), -1, 3)).mean(1)
stopprof += curr_counts[:, cdslen-stoplen:cdslen]
num_cds_incl += 1
for inbam in inbams:
inbam.close()
return startprof, cdsprof, stopprof, num_cds_incl
def _orf_profile(orflen):
"""Generate a profile for an ORF based on the metagene profile
Parameters
----------
orflen : int
Number of nucleotides in the ORF, including the start and stop codons
Returns
-------
np.ndarray<float>
The expected profile for the ORF. Number of rows will match the number of rows in the metagene profile. Number of columns will be
orflen + stopnt[1] - startnt[0]
"""
assert orflen % 3 == 0
assert orflen > 0
short_stop = 9
if orflen >= startnt[1]-stopnt[0]: # long enough to include everything
return np.hstack((startprof, np.tile(cdsprof, (orflen-startnt[1]+stopnt[0])/3), stopprof))
elif orflen >= startnt[1]+short_stop:
return np.hstack((startprof, stopprof[:, startnt[1]-orflen-stopnt[1]:]))
elif orflen >= short_stop:
return np.hstack((startprof[:, :orflen-short_stop-startnt[0]], stopprof[:, -short_stop-stopnt[1]:]))
else: # very short!
return np.hstack((startprof[:, :3-startnt[0]], stopprof[:, 3-orflen-stopnt[0]:]))
if opts.startonly:
failure_return = (pd.DataFrame(), pd.DataFrame())
else:
failure_return = (pd.DataFrame(), pd.DataFrame(), pd.DataFrame())
def _regress_tfam(orf_set, gnd):
"""Performs non-negative least squares regression on all of the ORFs in a transcript family, using profiles constructed via _orf_profile()
Also calculates Wald statistics for each orf and start codon, and for each stop codon if opts.startonly is False"""
tfam = orf_set['tfam'].iat[0]
strand = orf_set['strand'].iat[0]
chrom = orf_set['chrom'].iat[0]
tids = orf_set['tid'].drop_duplicates().tolist()
all_tfam_genpos = set()
tid_genpos = {}
tlens = {}
for (i, tid) in enumerate(tids):
currtrans = SegmentChain.from_bed(bedlinedict[tid])
curr_pos_set = currtrans.get_position_set()
tlens[tid] = len(curr_pos_set)
tid_genpos[tid] = curr_pos_set
all_tfam_genpos.update(curr_pos_set)
tfam_segs = SegmentChain(*positionlist_to_segments(chrom, strand, list(all_tfam_genpos)))
all_tfam_genpos = np.array(sorted(all_tfam_genpos))
if strand == '-':
all_tfam_genpos = all_tfam_genpos[::-1]
nnt = len(all_tfam_genpos)
tid_indices = {tid: np.flatnonzero(np.in1d(all_tfam_genpos, list(curr_tid_genpos), assume_unique=True))
for (tid, curr_tid_genpos) in tid_genpos.iteritems()}
hashed_counts = get_hashed_counts(tfam_segs, gnd)
counts = np.zeros((len(rdlens), nnt), dtype=np.float64) # even though they are integer-valued, will need to do float arithmetic
for (i, rdlen) in enumerate(rdlens):
for nmis in range(1+opts.max5mis):
counts[i, :] += hashed_counts[(rdlen, nmis)]
counts = counts.ravel()
if opts.startcount:
# Only include ORFS for which there is at least some minimum reads within one nucleotide of the start codon
offsetmat = np.tile(nnt*np.arange(len(rdlens)), 3) # offsets for each cond, expecting three positions to check for each
# try:
orf_set = orf_set[[(counts[(start_idxes.repeat(len(rdlens))+offsetmat)].sum() >= opts.startcount) for start_idxes in
[tid_indices[tid][tcoord-1:tcoord+2] for (tid, tcoord, tstop) in orf_set[['tid', 'tcoord', 'tstop']].itertuples(False)]]]
if orf_set.empty:
return failure_return
orf_strength_df = orf_set.sort_values('tcoord', ascending=False).drop_duplicates('orfname').reset_index(drop=True)
abort_set = orf_set.drop_duplicates('gcoord').copy()
abort_set['gstop'] = abort_set['gcoord'] # should maybe be +/-3, but then need to worry about splicing - and this is an easy flag
abort_set['tstop'] = abort_set['tcoord']+3 # stop after the first codon
abort_set['orfname'] = abort_set['gcoord'].apply(lambda x: '%s_%d_abort' % (tfam, x))
orf_strength_df = pd.concat((orf_strength_df, abort_set), ignore_index=True)
if not opts.startonly: # if marking full ORFs, include histop model
stop_set = orf_set.drop_duplicates('gstop').copy()
stop_set['gcoord'] = stop_set['gstop'] # this is an easy flag
stop_set['tcoord'] = stop_set['tstop'] # should probably be -3 nt, but this is another easy flag that distinguishes from abinit
stop_set['orfname'] = stop_set['gstop'].apply(lambda x: '%s_%d_stop' % (tfam, x))
orf_strength_df = pd.concat((orf_strength_df, stop_set), ignore_index=True)
orf_profs = []
indices = []
for (tid, tcoord, tstop) in orf_strength_df[['tid', 'tcoord', 'tstop']].itertuples(False):
if tcoord != tstop: # not a histop
tlen = tlens[tid]
if tcoord+startnt[0] < 0:
startadj = -startnt[0]-tcoord # number of nts to remove from the start due to short 5' UTR; guaranteed > 0
else:
startadj = 0
if tstop+stopnt[1] > tlen:
stopadj = tstop+stopnt[1]-tlen # number of nts to remove from the end due to short 3' UTR; guaranteed > 0
else:
stopadj = 0
curr_indices = tid_indices[tid][tcoord+startnt[0]+startadj:tstop+stopnt[1]-stopadj]
orf_profs.append(_orf_profile(tstop-tcoord)[:, startadj:tstop-tcoord+stopnt[1]-startnt[0]-stopadj].ravel())
else: # histop
curr_indices = tid_indices[tid][tstop-6:tstop]
orf_profs.append(stopprof[:, -6:].ravel())
indices.append(np.concatenate([nnt*i+curr_indices for i in xrange(len(rdlens))]))
# need to tile the indices for each read length
if len(indices[-1]) != len(orf_profs[-1]):
raise AssertionError('ORF length does not match index length')
orf_matrix = scipy.sparse.csc_matrix((np.concatenate(orf_profs),
np.concatenate(indices),
np.cumsum([0]+[len(curr_indices) for curr_indices in indices])),
shape=(nnt*len(rdlens), len(orf_strength_df)))
# better to make it a sparse matrix, even though nnls requires a dense matrix, because of linear algebra to come
nonzero_orfs = np.flatnonzero(orf_matrix.T.dot(counts) > 0)
if len(nonzero_orfs) == 0: # no possibility of anything coming up
return failure_return
orf_matrix = orf_matrix[:, nonzero_orfs]
orf_strength_df = orf_strength_df.iloc[nonzero_orfs] # don't bother fitting ORFs with zero reads throughout their entire length
(orf_strs, resid) = nnls(orf_matrix.toarray(), counts)
min_str = 1e-6 # allow for machine rounding error
usable_orfs = orf_strs > min_str
if not usable_orfs.any():
return failure_return
orf_strength_df = orf_strength_df[usable_orfs]
orf_matrix = orf_matrix[:, usable_orfs] # remove entries for zero-strength ORFs or transcripts
orf_strs = orf_strs[usable_orfs]
orf_strength_df['orf_strength'] = orf_strs
covmat = resid*resid*np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray())/(nnt*len(rdlens)-len(orf_strength_df))
# homoscedastic version (assume equal variance at all positions)
# resids = counts-orf_matrix.dot(orf_strs)
# simple_covmat = np.linalg.inv(orf_matrix.T.dot(orf_matrix).toarray())
# covmat = simple_covmat.dot(orf_matrix.T.dot(scipy.sparse.dia_matrix((resids*resids, 0), (len(resids), len(resids))))
# .dot(orf_matrix).dot(simple_covmat))
# # heteroscedastic version (Eicker-Huber-White robust estimator)
orf_strength_df['W_orf'] = orf_strength_df['orf_strength']*orf_strength_df['orf_strength']/np.diag(covmat)
orf_strength_df.set_index('orfname', inplace=True)
elongating_orfs = ~(orf_strength_df['gstop'] == orf_strength_df['gcoord'])
if opts.startonly: # count abortive initiation events towards start strength in this case
include_starts = (orf_strength_df['tcoord'] != orf_strength_df['tstop'])
if not include_starts.any():
return failure_return # no need to keep going if there weren't any useful starts
gcoord_grps = orf_strength_df[include_starts].groupby('gcoord')
# even if we are willing to count abinit towards start strength, we certainly shouldn't count histop
covmat_starts = covmat[np.ix_(include_starts.values, include_starts.values)]
orf_strs_starts = orf_strs[include_starts.values]
else:
if not elongating_orfs.any():
return failure_return
gcoord_grps = orf_strength_df[elongating_orfs].groupby('gcoord')
covmat_starts = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)]
orf_strs_starts = orf_strs[elongating_orfs.values]
start_strength_df = pd.DataFrame.from_items([('tfam', tfam),
('chrom', orf_set['chrom'].iloc[0]),
('strand', orf_set['strand'].iloc[0]),
('codon', gcoord_grps['codon'].first()),
('start_strength', gcoord_grps['orf_strength'].aggregate(np.sum))])
start_strength_df['W_start'] = pd.Series({gcoord: orf_strs_starts[rownums].dot(np.linalg.inv(covmat_starts[np.ix_(rownums, rownums)]))
.dot(orf_strs_starts[rownums]) for (gcoord, rownums) in gcoord_grps.indices.iteritems()})
if not opts.startonly:
# count histop towards the stop codon - but still exclude abinit
include_stops = (elongating_orfs | (orf_strength_df['tcoord'] == orf_strength_df['tstop']))
gstop_grps = orf_strength_df[include_stops].groupby('gstop')
covmat_stops = covmat[np.ix_(include_stops.values, include_stops.values)]
orf_strs_stops = orf_strs[include_stops.values]
stop_strength_df = pd.DataFrame.from_items([('tfam', tfam),
('chrom', orf_set['chrom'].iloc[0]),
('strand', orf_set['strand'].iloc[0]),
('stop_strength', gstop_grps['orf_strength'].aggregate(np.sum))])
stop_strength_df['W_stop'] = pd.Series({gstop: orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums, rownums)]))
.dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()})
# # nohistop
# gstop_grps = orf_strength_df[elongating_orfs].groupby('gstop')
# covmat_stops = covmat[np.ix_(elongating_orfs.values, elongating_orfs.values)]
# orf_strs_stops = orf_strs[elongating_orfs.values]
# stop_strength_df['stop_strength_nohistop'] = gstop_grps['orf_strength'].aggregate(np.sum)
# stop_strength_df['W_stop_nohistop'] = pd.Series({gstop:orf_strs_stops[rownums].dot(np.linalg.inv(covmat_stops[np.ix_(rownums,rownums)]))
# .dot(orf_strs_stops[rownums]) for (gstop, rownums) in gstop_grps.indices.iteritems()})
return orf_strength_df, start_strength_df, stop_strength_df
else:
return orf_strength_df, start_strength_df
def _regress_chrom(chrom_to_do):
"""Applies _regress_tfam() to all of the transcript families on a chromosome"""
chrom_orfs = pd.read_hdf(opts.orfstore, 'all_orfs', mode='r', where="chrom == %r and tstop > 0 and tcoord > 0" % chrom_to_do,
columns=['orfname', 'tfam', 'tid', 'tcoord', 'tstop', 'AAlen', 'chrom', 'gcoord', 'gstop', 'strand',
'codon', 'orftype', 'annot_start', 'annot_stop'])
# tcoord > 0 removes ORFs where the first codon is an NTG, to avoid an indexing error
# Those ORFs would never get called anyway since they couldn't possibly have any reads at their start codon
if opts.exclude:
chrom_orfs = chrom_orfs[~chrom_orfs['tfam'].isin(opts.exclude)]
if restrictbystartfilenames:
restrictedstarts = pd.DataFrame()
for (restrictbystart, minw) in zip(restrictbystartfilenames, opts.minwstart):
restrictedstarts = restrictedstarts.append(
pd.read_hdf(restrictbystart, 'start_strengths', mode='r', where="(chrom == %r) & (W_start > minw)" % chrom_to_do,
columns=['tfam', 'chrom', 'gcoord', 'strand']), ignore_index=True).drop_duplicates()
chrom_orfs = chrom_orfs.merge(restrictedstarts) # inner merge acts as a filter
if chrom_orfs.empty:
if opts.verbose > 1:
with log_lock:
logprint('No ORFs found on %s' % chrom_to_do)
return failure_return
inbams = [pysam.Samfile(infile, 'rb') for infile in opts.bamfiles]
gnd = HashedReadBAMGenomeArray(inbams, ReadKeyMapFactory(Pdict, read_length_nmis))
res = tuple([ | pd.concat(res_dfs) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.